tag | line | file | source code |
vma | 164 | arch/alpha/kernel/ptrace.c | struct vm_area_struct * vma, unsigned long addr) |
vma | 173 | arch/alpha/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 175 | arch/alpha/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 185 | arch/alpha/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 195 | arch/alpha/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 215 | arch/alpha/kernel/ptrace.c | static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 224 | arch/alpha/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 226 | arch/alpha/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 236 | arch/alpha/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 246 | arch/alpha/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 251 | arch/alpha/kernel/ptrace.c | do_wp_page(tsk, vma, addr, 1); |
vma | 259 | arch/alpha/kernel/ptrace.c | set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
vma | 266 | arch/alpha/kernel/ptrace.c | struct vm_area_struct * vma; |
vma | 269 | arch/alpha/kernel/ptrace.c | vma = find_vma(tsk,addr); |
vma | 270 | arch/alpha/kernel/ptrace.c | if (!vma) |
vma | 272 | arch/alpha/kernel/ptrace.c | if (vma->vm_start <= addr) |
vma | 273 | arch/alpha/kernel/ptrace.c | return vma; |
vma | 274 | arch/alpha/kernel/ptrace.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 276 | arch/alpha/kernel/ptrace.c | if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur) |
vma | 278 | arch/alpha/kernel/ptrace.c | vma->vm_offset -= vma->vm_start - addr; |
vma | 279 | arch/alpha/kernel/ptrace.c | vma->vm_start = addr; |
vma | 280 | arch/alpha/kernel/ptrace.c | return vma; |
vma | 290 | arch/alpha/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 293 | arch/alpha/kernel/ptrace.c | if (!vma) { |
vma | 298 | arch/alpha/kernel/ptrace.c | struct vm_area_struct * vma_high = vma; |
vma | 301 | arch/alpha/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 302 | arch/alpha/kernel/ptrace.c | vma_high = vma->vm_next; |
vma | 303 | arch/alpha/kernel/ptrace.c | if (!vma_high || vma_high->vm_start != vma->vm_end) |
vma | 308 | arch/alpha/kernel/ptrace.c | low = get_long(tsk, vma, addr); |
vma | 318 | arch/alpha/kernel/ptrace.c | long l = get_long(tsk, vma, addr); |
vma | 333 | arch/alpha/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 335 | arch/alpha/kernel/ptrace.c | if (!vma) |
vma | 339 | arch/alpha/kernel/ptrace.c | struct vm_area_struct * vma_high = vma; |
vma | 341 | arch/alpha/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 342 | arch/alpha/kernel/ptrace.c | vma_high = vma->vm_next; |
vma | 343 | arch/alpha/kernel/ptrace.c | if (!vma_high || vma_high->vm_start != vma->vm_end) |
vma | 348 | arch/alpha/kernel/ptrace.c | low = get_long(tsk, vma, addr); |
vma | 354 | arch/alpha/kernel/ptrace.c | put_long(tsk, vma, addr, low); |
vma | 357 | arch/alpha/kernel/ptrace.c | put_long(tsk, vma, addr, data); |
vma | 60 | arch/alpha/mm/fault.c | struct vm_area_struct * vma; |
vma | 62 | arch/alpha/mm/fault.c | vma = find_vma(current, address); |
vma | 63 | arch/alpha/mm/fault.c | if (!vma) |
vma | 65 | arch/alpha/mm/fault.c | if (vma->vm_start <= address) |
vma | 67 | arch/alpha/mm/fault.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 69 | arch/alpha/mm/fault.c | if (expand_stack(vma, address)) |
vma | 77 | arch/alpha/mm/fault.c | if (!(vma->vm_flags & VM_EXEC)) |
vma | 81 | arch/alpha/mm/fault.c | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) |
vma | 84 | arch/alpha/mm/fault.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 88 | arch/alpha/mm/fault.c | handle_mm_fault(vma, address, cause > 0); |
vma | 87 | arch/i386/kernel/ptrace.c | struct vm_area_struct * vma, unsigned long addr) |
vma | 95 | arch/i386/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 97 | arch/i386/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 107 | arch/i386/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 117 | arch/i386/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 137 | arch/i386/kernel/ptrace.c | static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr, |
vma | 146 | arch/i386/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 148 | arch/i386/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 158 | arch/i386/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 168 | arch/i386/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 173 | arch/i386/kernel/ptrace.c | do_wp_page(tsk, vma, addr, 1); |
vma | 181 | arch/i386/kernel/ptrace.c | set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
vma | 187 | arch/i386/kernel/ptrace.c | struct vm_area_struct * vma; |
vma | 190 | arch/i386/kernel/ptrace.c | vma = find_vma(tsk,addr); |
vma | 191 | arch/i386/kernel/ptrace.c | if (!vma) |
vma | 193 | arch/i386/kernel/ptrace.c | if (vma->vm_start <= addr) |
vma | 194 | arch/i386/kernel/ptrace.c | return vma; |
vma | 195 | arch/i386/kernel/ptrace.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 197 | arch/i386/kernel/ptrace.c | if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur) |
vma | 199 | arch/i386/kernel/ptrace.c | vma->vm_offset -= vma->vm_start - addr; |
vma | 200 | arch/i386/kernel/ptrace.c | vma->vm_start = addr; |
vma | 201 | arch/i386/kernel/ptrace.c | return vma; |
vma | 211 | arch/i386/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 213 | arch/i386/kernel/ptrace.c | if (!vma) |
vma | 217 | arch/i386/kernel/ptrace.c | struct vm_area_struct * vma_high = vma; |
vma | 219 | arch/i386/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 220 | arch/i386/kernel/ptrace.c | vma_high = vma->vm_next; |
vma | 221 | arch/i386/kernel/ptrace.c | if (!vma_high || vma_high->vm_start != vma->vm_end) |
vma | 224 | arch/i386/kernel/ptrace.c | low = get_long(tsk, vma, addr & ~(sizeof(long)-1)); |
vma | 242 | arch/i386/kernel/ptrace.c | *result = get_long(tsk, vma, addr); |
vma | 253 | arch/i386/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 255 | arch/i386/kernel/ptrace.c | if (!vma) |
vma | 259 | arch/i386/kernel/ptrace.c | struct vm_area_struct * vma_high = vma; |
vma | 261 | arch/i386/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 262 | arch/i386/kernel/ptrace.c | vma_high = vma->vm_next; |
vma | 263 | arch/i386/kernel/ptrace.c | if (!vma_high || vma_high->vm_start != vma->vm_end) |
vma | 266 | arch/i386/kernel/ptrace.c | low = get_long(tsk, vma, addr & ~(sizeof(long)-1)); |
vma | 291 | arch/i386/kernel/ptrace.c | put_long(tsk, vma, addr & ~(sizeof(long)-1),low); |
vma | 294 | arch/i386/kernel/ptrace.c | put_long(tsk, vma, addr, data); |
vma | 36 | arch/i386/mm/fault.c | struct vm_area_struct * vma; |
vma | 42 | arch/i386/mm/fault.c | vma = find_vma(current, address); |
vma | 43 | arch/i386/mm/fault.c | if (!vma) |
vma | 45 | arch/i386/mm/fault.c | if (vma->vm_start <= address) |
vma | 47 | arch/i386/mm/fault.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 59 | arch/i386/mm/fault.c | if (expand_stack(vma, address)) |
vma | 70 | arch/i386/mm/fault.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 76 | arch/i386/mm/fault.c | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
vma | 92 | arch/i386/mm/fault.c | do_wp_page(current, vma, address, error_code & 2); |
vma | 95 | arch/i386/mm/fault.c | do_no_page(current, vma, address, error_code & 2); |
vma | 107 | arch/m68k/kernel/ptrace.c | struct vm_area_struct * vma, unsigned long addr) |
vma | 115 | arch/m68k/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 117 | arch/m68k/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 127 | arch/m68k/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 138 | arch/m68k/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 158 | arch/m68k/kernel/ptrace.c | static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr, |
vma | 167 | arch/m68k/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 169 | arch/m68k/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 179 | arch/m68k/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 190 | arch/m68k/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 195 | arch/m68k/kernel/ptrace.c | do_wp_page(tsk, vma, addr, 2); |
vma | 204 | arch/m68k/kernel/ptrace.c | *pgtable = pte_mkdirty(mk_pte(page, vma->vm_page_prot)); |
vma | 210 | arch/m68k/kernel/ptrace.c | struct vm_area_struct * vma; |
vma | 213 | arch/m68k/kernel/ptrace.c | vma = find_vma(tsk,addr); |
vma | 214 | arch/m68k/kernel/ptrace.c | if (!vma) |
vma | 216 | arch/m68k/kernel/ptrace.c | if (vma->vm_start <= addr) |
vma | 217 | arch/m68k/kernel/ptrace.c | return vma; |
vma | 218 | arch/m68k/kernel/ptrace.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 220 | arch/m68k/kernel/ptrace.c | if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur) |
vma | 222 | arch/m68k/kernel/ptrace.c | vma->vm_offset -= vma->vm_start - addr; |
vma | 223 | arch/m68k/kernel/ptrace.c | vma->vm_start = addr; |
vma | 224 | arch/m68k/kernel/ptrace.c | return vma; |
vma | 234 | arch/m68k/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 236 | arch/m68k/kernel/ptrace.c | if (!vma) |
vma | 240 | arch/m68k/kernel/ptrace.c | struct vm_area_struct * vma_low = vma; |
vma | 242 | arch/m68k/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 243 | arch/m68k/kernel/ptrace.c | vma_low = vma->vm_next; |
vma | 244 | arch/m68k/kernel/ptrace.c | if (!vma_low || vma_low->vm_start != vma->vm_end) |
vma | 247 | arch/m68k/kernel/ptrace.c | high = get_long(tsk, vma,addr & ~(sizeof(long)-1)); |
vma | 265 | arch/m68k/kernel/ptrace.c | *result = get_long(tsk, vma,addr); |
vma | 276 | arch/m68k/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 278 | arch/m68k/kernel/ptrace.c | if (!vma) |
vma | 282 | arch/m68k/kernel/ptrace.c | struct vm_area_struct * vma_low = vma; |
vma | 284 | arch/m68k/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 285 | arch/m68k/kernel/ptrace.c | vma_low = vma->vm_next; |
vma | 286 | arch/m68k/kernel/ptrace.c | if (!vma_low || vma_low->vm_start != vma->vm_end) |
vma | 289 | arch/m68k/kernel/ptrace.c | high = get_long(tsk, vma,addr & ~(sizeof(long)-1)); |
vma | 314 | arch/m68k/kernel/ptrace.c | put_long(tsk, vma,addr & ~(sizeof(long)-1),high); |
vma | 317 | arch/m68k/kernel/ptrace.c | put_long(tsk, vma,addr,data); |
vma | 31 | arch/m68k/mm/fault.c | struct vm_area_struct * vma; |
vma | 39 | arch/m68k/mm/fault.c | vma = find_vma(current, address); |
vma | 40 | arch/m68k/mm/fault.c | if (!vma) |
vma | 42 | arch/m68k/mm/fault.c | if (vma->vm_start <= address) |
vma | 44 | arch/m68k/mm/fault.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 54 | arch/m68k/mm/fault.c | if (expand_stack(vma, address)) |
vma | 66 | arch/m68k/mm/fault.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 72 | arch/m68k/mm/fault.c | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
vma | 76 | arch/m68k/mm/fault.c | do_wp_page(current, vma, address, error_code & 2); |
vma | 79 | arch/m68k/mm/fault.c | do_no_page(current, vma, address, error_code & 2); |
vma | 86 | arch/mips/kernel/ptrace.c | static unsigned long get_long(struct vm_area_struct * vma, unsigned long addr) |
vma | 93 | arch/mips/kernel/ptrace.c | pgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr); |
vma | 95 | arch/mips/kernel/ptrace.c | do_no_page(vma, addr, 0); |
vma | 105 | arch/mips/kernel/ptrace.c | do_no_page(vma, addr, 0); |
vma | 125 | arch/mips/kernel/ptrace.c | static void put_long(struct vm_area_struct * vma, unsigned long addr, |
vma | 133 | arch/mips/kernel/ptrace.c | pgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr); |
vma | 135 | arch/mips/kernel/ptrace.c | do_no_page(vma, addr, 1); |
vma | 145 | arch/mips/kernel/ptrace.c | do_no_page(vma, addr, 1); |
vma | 150 | arch/mips/kernel/ptrace.c | do_wp_page(vma, addr, 1); |
vma | 158 | arch/mips/kernel/ptrace.c | set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
vma | 164 | arch/mips/kernel/ptrace.c | struct vm_area_struct * vma; |
vma | 167 | arch/mips/kernel/ptrace.c | vma = find_vma(tsk, addr); |
vma | 168 | arch/mips/kernel/ptrace.c | if (!vma) |
vma | 170 | arch/mips/kernel/ptrace.c | if (vma->vm_start <= addr) |
vma | 171 | arch/mips/kernel/ptrace.c | return vma; |
vma | 172 | arch/mips/kernel/ptrace.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 174 | arch/mips/kernel/ptrace.c | if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur) |
vma | 176 | arch/mips/kernel/ptrace.c | vma->vm_offset -= vma->vm_start - addr; |
vma | 177 | arch/mips/kernel/ptrace.c | vma->vm_start = addr; |
vma | 178 | arch/mips/kernel/ptrace.c | return vma; |
vma | 188 | arch/mips/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 190 | arch/mips/kernel/ptrace.c | if (!vma) |
vma | 194 | arch/mips/kernel/ptrace.c | struct vm_area_struct * vma_high = vma; |
vma | 196 | arch/mips/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 197 | arch/mips/kernel/ptrace.c | vma_high = vma->vm_next; |
vma | 198 | arch/mips/kernel/ptrace.c | if (!vma_high || vma_high->vm_start != vma->vm_end) |
vma | 201 | arch/mips/kernel/ptrace.c | low = get_long(vma, addr & ~(sizeof(long)-1)); |
vma | 219 | arch/mips/kernel/ptrace.c | *result = get_long(vma, addr); |
vma | 230 | arch/mips/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 232 | arch/mips/kernel/ptrace.c | if (!vma) |
vma | 236 | arch/mips/kernel/ptrace.c | struct vm_area_struct * vma_high = vma; |
vma | 238 | arch/mips/kernel/ptrace.c | if (addr + sizeof(long) >= vma->vm_end) { |
vma | 239 | arch/mips/kernel/ptrace.c | vma_high = vma->vm_next; |
vma | 240 | arch/mips/kernel/ptrace.c | if (!vma_high || vma_high->vm_start != vma->vm_end) |
vma | 243 | arch/mips/kernel/ptrace.c | low = get_long(vma, addr & ~(sizeof(long)-1)); |
vma | 268 | arch/mips/kernel/ptrace.c | put_long(vma, addr & ~(sizeof(long)-1),low); |
vma | 271 | arch/mips/kernel/ptrace.c | put_long(vma, addr, data); |
vma | 36 | arch/mips/kernel/sysmips.c | struct vm_area_struct * vma; |
vma | 38 | arch/mips/kernel/sysmips.c | vma = find_vma(current, address); |
vma | 39 | arch/mips/kernel/sysmips.c | if (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ)) |
vma | 41 | arch/mips/kernel/sysmips.c | address = vma->vm_end - address; |
vma | 44 | arch/mips/kernel/sysmips.c | if (vma->vm_next && vma->vm_next->vm_start == vma->vm_end && |
vma | 45 | arch/mips/kernel/sysmips.c | (vma->vm_next->vm_flags & VM_READ)) |
vma | 31 | arch/mips/mm/fault.c | struct vm_area_struct * vma; |
vma | 38 | arch/mips/mm/fault.c | vma = find_vma(current, address); |
vma | 39 | arch/mips/mm/fault.c | if (!vma) |
vma | 41 | arch/mips/mm/fault.c | if (vma->vm_start <= address) |
vma | 43 | arch/mips/mm/fault.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 45 | arch/mips/mm/fault.c | if (expand_stack(vma, address)) |
vma | 53 | arch/mips/mm/fault.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 56 | arch/mips/mm/fault.c | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
vma | 59 | arch/mips/mm/fault.c | handle_mm_fault(vma, address, writeaccess); |
vma | 185 | arch/ppc/mm/fault.c | struct vm_area_struct * vma; |
vma | 190 | arch/ppc/mm/fault.c | for (vma = current->mm->mmap ; ; vma = vma->vm_next) |
vma | 192 | arch/ppc/mm/fault.c | if (!vma) |
vma | 198 | arch/ppc/mm/fault.c | if (vma->vm_end > address) |
vma | 202 | arch/ppc/mm/fault.c | vma = find_vma(current, address); |
vma | 203 | arch/ppc/mm/fault.c | if (!vma) |
vma | 208 | arch/ppc/mm/fault.c | if (vma->vm_start <= address){ |
vma | 211 | arch/ppc/mm/fault.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 217 | arch/ppc/mm/fault.c | if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur) |
vma | 219 | arch/ppc/mm/fault.c | printk("stack2: vma->vm_end-address %x rlim %x\n", vma->vm_end - address, |
vma | 221 | arch/ppc/mm/fault.c | printk("stack2: vm_end %x address = %x\n", vma->vm_end,address); |
vma | 226 | arch/ppc/mm/fault.c | vma->vm_offset -= vma->vm_start - (address & PAGE_MASK); |
vma | 227 | arch/ppc/mm/fault.c | vma->vm_start = (address & PAGE_MASK); |
vma | 238 | arch/ppc/mm/fault.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 242 | arch/ppc/mm/fault.c | current,address,vma->vm_flags,current->mm,vma,vma->vm_start,vma->vm_end); |
vma | 252 | arch/ppc/mm/fault.c | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
vma | 255 | arch/ppc/mm/fault.c | _printk("vma = %x\n", vma); |
vma | 256 | arch/ppc/mm/fault.c | _printk("vma->vm_flags = %x\n", vma->vm_flags); |
vma | 261 | arch/ppc/mm/fault.c | vma, VM_READ,VM_EXEC); |
vma | 263 | arch/ppc/mm/fault.c | vma->vm_start, vma->vm_end); |
vma | 266 | arch/ppc/mm/fault.c | printk("vma->vm_flags = %x\n", vma->vm_flags); |
vma | 273 | arch/ppc/mm/fault.c | handle_mm_fault(vma, address, error_code & 2); |
vma | 296 | arch/ppc/mm/fault.c | current,address,vma->vm_flags,current->mm,vma,vma->vm_start,vma->vm_end); |
vma | 38 | arch/sparc/kernel/ptrace.c | struct vm_area_struct * vma, unsigned long addr) |
vma | 46 | arch/sparc/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 48 | arch/sparc/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 58 | arch/sparc/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 68 | arch/sparc/kernel/ptrace.c | do_no_page(tsk, vma, addr, 0); |
vma | 90 | arch/sparc/kernel/ptrace.c | static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 99 | arch/sparc/kernel/ptrace.c | pgdir = pgd_offset(vma->vm_mm, addr); |
vma | 101 | arch/sparc/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 111 | arch/sparc/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 121 | arch/sparc/kernel/ptrace.c | do_no_page(tsk, vma, addr, 1); |
vma | 126 | arch/sparc/kernel/ptrace.c | do_wp_page(tsk, vma, addr, 1); |
vma | 130 | arch/sparc/kernel/ptrace.c | flush_cache_page(vma, page); |
vma | 137 | arch/sparc/kernel/ptrace.c | set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
vma | 138 | arch/sparc/kernel/ptrace.c | flush_tlb_page(vma, page); |
vma | 144 | arch/sparc/kernel/ptrace.c | struct vm_area_struct * vma; |
vma | 147 | arch/sparc/kernel/ptrace.c | vma = find_vma(tsk,addr); |
vma | 148 | arch/sparc/kernel/ptrace.c | if (!vma) |
vma | 150 | arch/sparc/kernel/ptrace.c | if (vma->vm_start <= addr) |
vma | 151 | arch/sparc/kernel/ptrace.c | return vma; |
vma | 152 | arch/sparc/kernel/ptrace.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 154 | arch/sparc/kernel/ptrace.c | if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur) |
vma | 156 | arch/sparc/kernel/ptrace.c | vma->vm_offset -= vma->vm_start - addr; |
vma | 157 | arch/sparc/kernel/ptrace.c | vma->vm_start = addr; |
vma | 158 | arch/sparc/kernel/ptrace.c | return vma; |
vma | 168 | arch/sparc/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 170 | arch/sparc/kernel/ptrace.c | if (!vma) |
vma | 172 | arch/sparc/kernel/ptrace.c | *result = get_long(tsk, vma, addr); |
vma | 179 | arch/sparc/kernel/ptrace.c | struct vm_area_struct *vma = find_extend_vma(tsk, addr&~3); |
vma | 182 | arch/sparc/kernel/ptrace.c | if(!vma) |
vma | 184 | arch/sparc/kernel/ptrace.c | tmp = get_long(tsk, vma, (addr & ~3)); |
vma | 209 | arch/sparc/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, addr); |
vma | 211 | arch/sparc/kernel/ptrace.c | if (!vma) |
vma | 213 | arch/sparc/kernel/ptrace.c | put_long(tsk, vma, addr, data); |
vma | 220 | arch/sparc/kernel/ptrace.c | struct vm_area_struct * vma = find_extend_vma(tsk, (addr & ~3)); |
vma | 223 | arch/sparc/kernel/ptrace.c | if (!vma) |
vma | 225 | arch/sparc/kernel/ptrace.c | tmp = get_long(tsk, vma, (addr & ~3)); |
vma | 244 | arch/sparc/kernel/ptrace.c | put_long(tsk, vma, (addr & ~3), tmp); |
vma | 544 | arch/sparc/kernel/ptrace.c | struct vm_area_struct *vma; |
vma | 552 | arch/sparc/kernel/ptrace.c | vma = find_extend_vma(child, addr); |
vma | 553 | arch/sparc/kernel/ptrace.c | if(vma && request == PTRACE_POKEDATA && (vma->vm_flags & VM_EXEC)) { |
vma | 557 | arch/sparc/kernel/smp.c | void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
vma | 558 | arch/sparc/kernel/smp.c | { xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); } |
vma | 560 | arch/sparc/kernel/smp.c | void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 561 | arch/sparc/kernel/smp.c | { xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); } |
vma | 133 | arch/sparc/mm/fault.c | struct vm_area_struct *vma; |
vma | 163 | arch/sparc/mm/fault.c | vma = find_vma(current, address); |
vma | 164 | arch/sparc/mm/fault.c | if(!vma) |
vma | 166 | arch/sparc/mm/fault.c | if(vma->vm_start <= address) |
vma | 168 | arch/sparc/mm/fault.c | if(!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 170 | arch/sparc/mm/fault.c | if(expand_stack(vma, address)) |
vma | 178 | arch/sparc/mm/fault.c | if(!(vma->vm_flags & VM_WRITE)) |
vma | 182 | arch/sparc/mm/fault.c | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) |
vma | 185 | arch/sparc/mm/fault.c | handle_mm_fault(vma, address, write); |
vma | 217 | arch/sparc/mm/fault.c | struct vm_area_struct *vma; |
vma | 219 | arch/sparc/mm/fault.c | vma = find_vma(current, address); |
vma | 220 | arch/sparc/mm/fault.c | if(!vma) |
vma | 222 | arch/sparc/mm/fault.c | if(vma->vm_start <= address) |
vma | 224 | arch/sparc/mm/fault.c | if(!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 226 | arch/sparc/mm/fault.c | if(expand_stack(vma, address)) |
vma | 230 | arch/sparc/mm/fault.c | if(!(vma->vm_flags & VM_WRITE)) |
vma | 233 | arch/sparc/mm/fault.c | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) |
vma | 235 | arch/sparc/mm/fault.c | handle_mm_fault(vma, address, write); |
vma | 42 | arch/sparc/mm/loadmmu.c | void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte); |
vma | 585 | arch/sparc/mm/srmmu.c | static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
vma | 588 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 662 | arch/sparc/mm/srmmu.c | static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 665 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 722 | arch/sparc/mm/srmmu.c | static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
vma | 725 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 729 | arch/sparc/mm/srmmu.c | if(vma->vm_flags & VM_EXEC) |
vma | 777 | arch/sparc/mm/srmmu.c | static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 780 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 835 | arch/sparc/mm/srmmu.c | static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
vma | 838 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 984 | arch/sparc/mm/srmmu.c | static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 987 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 1053 | arch/sparc/mm/srmmu.c | static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 1056 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 1108 | arch/sparc/mm/srmmu.c | static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
vma | 1110 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 1209 | arch/sparc/mm/srmmu.c | static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 1211 | arch/sparc/mm/srmmu.c | struct mm_struct *mm = vma->vm_mm; |
vma | 1935 | arch/sparc/mm/srmmu.c | static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
vma | 568 | arch/sparc/mm/sun4c.c | static void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
vma | 1032 | arch/sparc/mm/sun4c.c | static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
vma | 1036 | arch/sparc/mm/sun4c.c | struct mm_struct *mm = vma->vm_mm; |
vma | 1142 | arch/sparc/mm/sun4c.c | static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
vma | 1144 | arch/sparc/mm/sun4c.c | struct mm_struct *mm = vma->vm_mm; |
vma | 189 | drivers/char/fbmem.c | fb_mmap(struct inode *inode, struct file *file, struct vm_area_struct * vma) |
vma | 197 | drivers/char/fbmem.c | if ((vma->vm_end - vma->vm_start + vma->vm_offset) > fix.smem_len) |
vma | 199 | drivers/char/fbmem.c | vma->vm_offset += fix.smem_start; |
vma | 200 | drivers/char/fbmem.c | if (vma->vm_offset & ~PAGE_MASK) |
vma | 203 | drivers/char/fbmem.c | pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; |
vma | 204 | drivers/char/fbmem.c | if (remap_page_range(vma->vm_start, vma->vm_offset, |
vma | 205 | drivers/char/fbmem.c | vma->vm_end - vma->vm_start, vma->vm_page_prot)) |
vma | 207 | drivers/char/fbmem.c | vma->vm_inode = inode; |
vma | 99 | drivers/char/mem.c | static int mmap_mem(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 101 | drivers/char/mem.c | if (vma->vm_offset & ~PAGE_MASK) |
vma | 110 | drivers/char/mem.c | if (x86 > 3 && vma->vm_offset >= high_memory) |
vma | 111 | drivers/char/mem.c | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; |
vma | 113 | drivers/char/mem.c | if (remap_page_range(vma->vm_start, vma->vm_offset, vma->vm_end - vma->vm_start, vma->vm_page_prot)) |
vma | 115 | drivers/char/mem.c | vma->vm_inode = inode; |
vma | 183 | drivers/char/mem.c | static int mmap_zero(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 185 | drivers/char/mem.c | if (vma->vm_flags & VM_SHARED) |
vma | 187 | drivers/char/mem.c | if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) |
vma | 670 | drivers/sbus/char/suncons.c | fb_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma) |
vma | 688 | drivers/sbus/char/suncons.c | v = (*fb->mmap)(inode, file, vma, fb->base, fb); |
vma | 829 | drivers/sbus/char/suncons.c | cg6_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx) |
vma | 835 | drivers/sbus/char/suncons.c | size = vma->vm_end - vma->vm_start; |
vma | 836 | drivers/sbus/char/suncons.c | if (vma->vm_offset & ~PAGE_MASK) |
vma | 840 | drivers/sbus/char/suncons.c | vma->vm_flags |= FB_MMAP_VM_FLAGS; |
vma | 844 | drivers/sbus/char/suncons.c | switch (vma->vm_offset+page){ |
vma | 889 | drivers/sbus/char/suncons.c | r = io_remap_page_range (vma->vm_start+page, |
vma | 891 | drivers/sbus/char/suncons.c | map_size, vma->vm_page_prot, |
vma | 896 | drivers/sbus/char/suncons.c | vma->vm_inode = inode; |
vma | 1082 | drivers/sbus/char/suncons.c | cg3_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx) |
vma | 1088 | drivers/sbus/char/suncons.c | size = vma->vm_end - vma->vm_start; |
vma | 1089 | drivers/sbus/char/suncons.c | if (vma->vm_offset & ~PAGE_MASK) |
vma | 1093 | drivers/sbus/char/suncons.c | vma->vm_flags |= FB_MMAP_VM_FLAGS; |
vma | 1097 | drivers/sbus/char/suncons.c | switch (vma->vm_offset+page){ |
vma | 1112 | drivers/sbus/char/suncons.c | r = io_remap_page_range (vma->vm_start+page, |
vma | 1114 | drivers/sbus/char/suncons.c | map_size, vma->vm_page_prot, |
vma | 1119 | drivers/sbus/char/suncons.c | vma->vm_inode = inode; |
vma | 1160 | drivers/sbus/char/suncons.c | bwtwo_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx) |
vma | 1166 | drivers/sbus/char/suncons.c | map_size = size = vma->vm_end - vma->vm_start; |
vma | 1168 | drivers/sbus/char/suncons.c | if (vma->vm_offset & ~PAGE_MASK) |
vma | 1172 | drivers/sbus/char/suncons.c | vma->vm_flags |= FB_MMAP_VM_FLAGS; |
vma | 1175 | drivers/sbus/char/suncons.c | (unsigned int) vma->vm_start, size, |
vma | 1176 | drivers/sbus/char/suncons.c | (unsigned int) vma->vm_offset); |
vma | 1180 | drivers/sbus/char/suncons.c | r = io_remap_page_range (vma->vm_start, map_offset, map_size, vma->vm_page_prot, |
vma | 1183 | drivers/sbus/char/suncons.c | vma->vm_inode = inode; |
vma | 234 | drivers/sound/soundcard.c | sound_mmap (inode_handle * inode, file_handle * file, vm_area_handle * vma) |
vma | 253 | drivers/sound/soundcard.c | if ((vma_get_flags (vma) & (VM_READ | VM_WRITE)) == (VM_READ | VM_WRITE)) |
vma | 259 | drivers/sound/soundcard.c | if (vma_get_flags (vma) & VM_READ) |
vma | 263 | drivers/sound/soundcard.c | else if (vma_get_flags (vma) & VM_WRITE) |
vma | 291 | drivers/sound/soundcard.c | if (vma_get_offset (vma) != 0) |
vma | 297 | drivers/sound/soundcard.c | size = vma_get_end (vma) - vma_get_start (vma); |
vma | 306 | drivers/sound/soundcard.c | if (remap_page_range (vma_get_start (vma), dmap->raw_buf_phys, |
vma | 307 | drivers/sound/soundcard.c | vma_get_end (vma) - vma_get_start (vma), |
vma | 308 | drivers/sound/soundcard.c | vma_get_page_prot (vma))) |
vma | 311 | drivers/sound/soundcard.c | vma_set_inode (vma, inode); |
vma | 842 | fs/binfmt_elf.c | static inline int maydump(struct vm_area_struct *vma) |
vma | 844 | fs/binfmt_elf.c | if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC))) |
vma | 847 | fs/binfmt_elf.c | if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN)) |
vma | 849 | fs/binfmt_elf.c | if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED)) |
vma | 945 | fs/binfmt_elf.c | struct vm_area_struct *vma; |
vma | 966 | fs/binfmt_elf.c | for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { |
vma | 967 | fs/binfmt_elf.c | if (maydump(vma)) |
vma | 969 | fs/binfmt_elf.c | int sz = vma->vm_end-vma->vm_start; |
vma | 1151 | fs/binfmt_elf.c | for(vma = current->mm->mmap, i = 0; |
vma | 1152 | fs/binfmt_elf.c | i < segs && vma != NULL; vma = vma->vm_next) { |
vma | 1158 | fs/binfmt_elf.c | sz = vma->vm_end - vma->vm_start; |
vma | 1162 | fs/binfmt_elf.c | phdr.p_vaddr = vma->vm_start; |
vma | 1164 | fs/binfmt_elf.c | phdr.p_filesz = maydump(vma) ? sz : 0; |
vma | 1167 | fs/binfmt_elf.c | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; |
vma | 1168 | fs/binfmt_elf.c | if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W; |
vma | 1169 | fs/binfmt_elf.c | if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X; |
vma | 1183 | fs/binfmt_elf.c | for(i = 0, vma = current->mm->mmap; |
vma | 1184 | fs/binfmt_elf.c | i < segs && vma != NULL; |
vma | 1185 | fs/binfmt_elf.c | vma = vma->vm_next) { |
vma | 1186 | fs/binfmt_elf.c | unsigned long addr = vma->vm_start; |
vma | 1187 | fs/binfmt_elf.c | unsigned long len = vma->vm_end - vma->vm_start; |
vma | 1189 | fs/binfmt_elf.c | if (!maydump(vma)) |
vma | 94 | fs/fat/mmap.c | int fat_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 96 | fs/fat/mmap.c | if (vma->vm_flags & VM_SHARED) /* only PAGE_COW or read-only supported now */ |
vma | 98 | fs/fat/mmap.c | if (vma->vm_offset & (inode->i_sb->s_blocksize - 1)) |
vma | 107 | fs/fat/mmap.c | vma->vm_inode = inode; |
vma | 109 | fs/fat/mmap.c | vma->vm_ops = &fat_file_mmap; |
vma | 253 | fs/locks.c | struct vm_area_struct *vma = inode->i_mmap; |
vma | 255 | fs/locks.c | if (vma->vm_flags & VM_MAYSHARE) |
vma | 257 | fs/locks.c | vma = vma->vm_next_share; |
vma | 258 | fs/locks.c | } while (vma != inode->i_mmap); |
vma | 31 | fs/namei.c | struct vm_area_struct * vma; |
vma | 35 | fs/namei.c | vma = find_vma(current, address); |
vma | 36 | fs/namei.c | if (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ)) |
vma | 38 | fs/namei.c | address = vma->vm_end - address; |
vma | 41 | fs/namei.c | if (vma->vm_next && vma->vm_next->vm_start == vma->vm_end && |
vma | 42 | fs/namei.c | (vma->vm_next->vm_flags & VM_READ)) |
vma | 132 | fs/ncpfs/mmap.c | ncp_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 142 | fs/ncpfs/mmap.c | if (vma->vm_flags & VM_SHARED) |
vma | 151 | fs/ncpfs/mmap.c | vma->vm_inode = inode; |
vma | 153 | fs/ncpfs/mmap.c | vma->vm_ops = &ncp_file_mmap; |
vma | 94 | fs/nfs/file.c | static int nfs_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 97 | fs/nfs/file.c | return generic_file_mmap(inode, file, vma); |
vma | 552 | fs/proc/array.c | struct vm_area_struct * vma = mm->mmap; |
vma | 556 | fs/proc/array.c | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
vma | 557 | fs/proc/array.c | unsigned long len = (vma->vm_end - vma->vm_start) >> 10; |
vma | 558 | fs/proc/array.c | if (!vma->vm_inode) { |
vma | 560 | fs/proc/array.c | if (vma->vm_flags & VM_GROWSDOWN) |
vma | 564 | fs/proc/array.c | if (vma->vm_flags & VM_WRITE) |
vma | 566 | fs/proc/array.c | if (vma->vm_flags & VM_EXEC) { |
vma | 568 | fs/proc/array.c | if (vma->vm_flags & VM_EXECUTABLE) |
vma | 655 | fs/proc/array.c | struct vm_area_struct *vma = tsk->mm->mmap; |
vma | 656 | fs/proc/array.c | while (vma) { |
vma | 657 | fs/proc/array.c | vsize += vma->vm_end - vma->vm_start; |
vma | 658 | fs/proc/array.c | vma = vma->vm_next; |
vma | 816 | fs/proc/array.c | struct vm_area_struct * vma = tsk->mm->mmap; |
vma | 818 | fs/proc/array.c | while (vma) { |
vma | 819 | fs/proc/array.c | pgd_t *pgd = pgd_offset(tsk->mm, vma->vm_start); |
vma | 822 | fs/proc/array.c | statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total); |
vma | 827 | fs/proc/array.c | if (vma->vm_flags & VM_EXECUTABLE) |
vma | 829 | fs/proc/array.c | else if (vma->vm_flags & VM_GROWSDOWN) |
vma | 831 | fs/proc/array.c | else if (vma->vm_end > 0x60000000) |
vma | 835 | fs/proc/array.c | vma = vma->vm_next; |
vma | 105 | fs/proc/link.c | struct vm_area_struct * vma; |
vma | 108 | fs/proc/link.c | vma = p->mm->mmap; |
vma | 109 | fs/proc/link.c | while (vma) { |
vma | 110 | fs/proc/link.c | if (vma->vm_flags & VM_EXECUTABLE) { |
vma | 111 | fs/proc/link.c | new_inode = vma->vm_inode; |
vma | 114 | fs/proc/link.c | vma = vma->vm_next; |
vma | 28 | fs/proc/mem.c | struct vm_area_struct *vma; |
vma | 31 | fs/proc/mem.c | vma = find_vma(tsk, addr); |
vma | 32 | fs/proc/mem.c | if (!vma) |
vma | 34 | fs/proc/mem.c | if (vma->vm_start > addr) |
vma | 36 | fs/proc/mem.c | if (!(vma->vm_flags & VM_READ)) |
vma | 38 | fs/proc/mem.c | while ((retval = vma->vm_end - addr) < count) { |
vma | 39 | fs/proc/mem.c | struct vm_area_struct *next = vma->vm_next; |
vma | 42 | fs/proc/mem.c | if (vma->vm_end != next->vm_start) |
vma | 46 | fs/proc/mem.c | vma = next; |
vma | 194 | fs/proc/mem.c | struct vm_area_struct * vma) |
vma | 222 | fs/proc/mem.c | stmp = vma->vm_offset; |
vma | 223 | fs/proc/mem.c | while (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) { |
vma | 257 | fs/proc/mem.c | stmp = vma->vm_offset; |
vma | 258 | fs/proc/mem.c | dtmp = vma->vm_start; |
vma | 260 | fs/proc/mem.c | flush_cache_range(vma->vm_mm, vma->vm_start, vma->vm_end); |
vma | 262 | fs/proc/mem.c | while (dtmp < vma->vm_end) { |
vma | 281 | fs/proc/mem.c | if ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table)) |
vma | 292 | fs/proc/mem.c | flush_tlb_range(vma->vm_mm, vma->vm_start, vma->vm_end); |
vma | 104 | fs/smbfs/mmap.c | smb_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 109 | fs/smbfs/mmap.c | if (vma->vm_flags & VM_SHARED) |
vma | 118 | fs/smbfs/mmap.c | vma->vm_inode = inode; |
vma | 120 | fs/smbfs/mmap.c | vma->vm_ops = &smb_file_mmap; |
vma | 790 | fs/super.c | struct vm_area_struct * vma; |
vma | 796 | fs/super.c | vma = find_vma(current, (unsigned long) data); |
vma | 797 | fs/super.c | if (!vma || (unsigned long) data < vma->vm_start) |
vma | 799 | fs/super.c | if (!(vma->vm_flags & VM_READ)) |
vma | 801 | fs/super.c | i = vma->vm_end - (unsigned long) data; |
vma | 19 | include/asm-alpha/pgtable.h | #define flush_cache_page(vma, vmaddr) do { } while (0) |
vma | 61 | include/asm-alpha/pgtable.h | struct vm_area_struct *vma, |
vma | 65 | include/asm-alpha/pgtable.h | tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr); |
vma | 67 | include/asm-alpha/pgtable.h | if (vma->vm_flags & VM_EXEC) |
vma | 110 | include/asm-alpha/pgtable.h | static inline void flush_tlb_page(struct vm_area_struct *vma, |
vma | 113 | include/asm-alpha/pgtable.h | struct mm_struct * mm = vma->vm_mm; |
vma | 118 | include/asm-alpha/pgtable.h | flush_tlb_current_page(mm, vma, addr); |
vma | 500 | include/asm-alpha/pgtable.h | extern inline void update_mmu_cache(struct vm_area_struct * vma, |
vma | 26 | include/asm-i386/pgtable.h | #define flush_cache_page(vma, vmaddr) do { } while (0) |
vma | 63 | include/asm-i386/pgtable.h | static inline void flush_tlb_page(struct vm_area_struct *vma, |
vma | 66 | include/asm-i386/pgtable.h | if (vma->vm_mm == current->mm) |
vma | 124 | include/asm-i386/pgtable.h | static inline void flush_tlb_page(struct vm_area_struct * vma, |
vma | 127 | include/asm-i386/pgtable.h | if (vma->vm_mm == current->mm && current->mm->count == 1) |
vma | 152 | include/asm-i386/pgtable.h | static inline void flush_tlb_page(struct vm_area_struct *vma, |
vma | 475 | include/asm-i386/pgtable.h | extern inline void update_mmu_cache(struct vm_area_struct * vma, |
vma | 40 | include/asm-m68k/pgtable.h | static inline void flush_tlb_page(struct vm_area_struct *vma, |
vma | 43 | include/asm-m68k/pgtable.h | if (vma->vm_mm == current->mm) |
vma | 534 | include/asm-m68k/pgtable.h | #define flush_cache_page(vma, addr) flush_cache_all() |
vma | 569 | include/asm-m68k/pgtable.h | extern inline void update_mmu_cache(struct vm_area_struct * vma, |
vma | 551 | include/asm-mips/pgtable.h | extern void update_mmu_cache(struct vm_area_struct * vma, |
vma | 483 | include/asm-ppc/pgtable.h | extern inline void update_mmu_cache(struct vm_area_struct * vma, |
vma | 487 | include/asm-ppc/pgtable.h | printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte); |
vma | 488 | include/asm-ppc/pgtable.h | _printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte); |
vma | 275 | include/asm-sparc/pgtable.h | extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
vma | 311 | include/asm-sparc/pgtable.h | extern void (*set_pte)(struct vm_area_struct *vma, unsigned long address, |
vma | 323 | include/asm-sparc/pgtable.h | extern void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte); |
vma | 262 | include/linux/mm.h | extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); |
vma | 267 | include/linux/mm.h | extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access); |
vma | 268 | include/linux/mm.h | extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access); |
vma | 269 | include/linux/mm.h | extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access); |
vma | 316 | include/linux/mm.h | static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) |
vma | 321 | include/linux/mm.h | if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur) |
vma | 323 | include/linux/mm.h | grow = vma->vm_start - address; |
vma | 324 | include/linux/mm.h | vma->vm_start = address; |
vma | 325 | include/linux/mm.h | vma->vm_offset -= grow; |
vma | 326 | include/linux/mm.h | vma->vm_mm->total_vm += grow >> PAGE_SHIFT; |
vma | 327 | include/linux/mm.h | if (vma->vm_flags & VM_LOCKED) |
vma | 328 | include/linux/mm.h | vma->vm_mm->locked_vm += grow >> PAGE_SHIFT; |
vma | 360 | include/linux/mm.h | struct vm_area_struct * vma; |
vma | 362 | include/linux/mm.h | vma = find_vma(task,start_addr); |
vma | 363 | include/linux/mm.h | if (!vma || end_addr <= vma->vm_start) |
vma | 365 | include/linux/mm.h | return vma; |
vma | 161 | include/linux/ncp_fs.h | int ncp_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma); |
vma | 136 | include/linux/nfs_fs.h | extern int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma); |
vma | 177 | include/linux/smb_fs.h | int smb_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma); |
vma | 801 | mm/filemap.c | static int filemap_write_page(struct vm_area_struct * vma, |
vma | 821 | mm/filemap.c | inode = vma->vm_inode; |
vma | 849 | mm/filemap.c | int filemap_swapout(struct vm_area_struct * vma, |
vma | 857 | mm/filemap.c | flush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset)); |
vma | 859 | mm/filemap.c | flush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset)); |
vma | 860 | mm/filemap.c | error = filemap_write_page(vma, offset, page); |
vma | 872 | mm/filemap.c | static pte_t filemap_swapin(struct vm_area_struct * vma, |
vma | 880 | mm/filemap.c | return mk_pte(page,vma->vm_page_prot); |
vma | 884 | mm/filemap.c | static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma, |
vma | 897 | mm/filemap.c | flush_cache_page(vma, address); |
vma | 899 | mm/filemap.c | flush_tlb_page(vma, address); |
vma | 905 | mm/filemap.c | flush_cache_page(vma, address); |
vma | 907 | mm/filemap.c | flush_tlb_page(vma, address); |
vma | 918 | mm/filemap.c | error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page); |
vma | 925 | mm/filemap.c | struct vm_area_struct *vma, unsigned long offset, unsigned int flags) |
vma | 946 | mm/filemap.c | error |= filemap_sync_pte(pte, vma, address + offset, flags); |
vma | 955 | mm/filemap.c | struct vm_area_struct *vma, unsigned int flags) |
vma | 976 | mm/filemap.c | error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags); |
vma | 983 | mm/filemap.c | static int filemap_sync(struct vm_area_struct * vma, unsigned long address, |
vma | 990 | mm/filemap.c | dir = pgd_offset(vma->vm_mm, address); |
vma | 991 | mm/filemap.c | flush_cache_range(vma->vm_mm, end - size, end); |
vma | 993 | mm/filemap.c | error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags); |
vma | 997 | mm/filemap.c | flush_tlb_range(vma->vm_mm, end - size, end); |
vma | 1004 | mm/filemap.c | static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len) |
vma | 1006 | mm/filemap.c | filemap_sync(vma, start, len, MS_ASYNC); |
vma | 1047 | mm/filemap.c | int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma) |
vma | 1051 | mm/filemap.c | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { |
vma | 1055 | mm/filemap.c | if (vma->vm_offset & (PAGE_SIZE - 1)) |
vma | 1059 | mm/filemap.c | if (vma->vm_offset & (inode->i_sb->s_blocksize - 1)) |
vma | 1070 | mm/filemap.c | vma->vm_inode = inode; |
vma | 1072 | mm/filemap.c | vma->vm_ops = ops; |
vma | 1081 | mm/filemap.c | static int msync_interval(struct vm_area_struct * vma, |
vma | 1084 | mm/filemap.c | if (!vma->vm_inode) |
vma | 1086 | mm/filemap.c | if (vma->vm_ops->sync) { |
vma | 1088 | mm/filemap.c | error = vma->vm_ops->sync(vma, start, end-start, flags); |
vma | 1092 | mm/filemap.c | return file_fsync(vma->vm_inode, NULL); |
vma | 1101 | mm/filemap.c | struct vm_area_struct * vma; |
vma | 1118 | mm/filemap.c | vma = find_vma(current, start); |
vma | 1122 | mm/filemap.c | if (!vma) |
vma | 1125 | mm/filemap.c | if (start < vma->vm_start) { |
vma | 1127 | mm/filemap.c | start = vma->vm_start; |
vma | 1130 | mm/filemap.c | if (end <= vma->vm_end) { |
vma | 1132 | mm/filemap.c | error = msync_interval(vma, start, end, flags); |
vma | 1139 | mm/filemap.c | error = msync_interval(vma, start, vma->vm_end, flags); |
vma | 1142 | mm/filemap.c | start = vma->vm_end; |
vma | 1143 | mm/filemap.c | vma = vma->vm_next; |
vma | 277 | mm/memory.c | struct vm_area_struct *vma) |
vma | 280 | mm/memory.c | unsigned long address = vma->vm_start; |
vma | 281 | mm/memory.c | unsigned long end = vma->vm_end; |
vma | 284 | mm/memory.c | cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE; |
vma | 287 | mm/memory.c | flush_cache_range(src, vma->vm_start, vma->vm_end); |
vma | 288 | mm/memory.c | flush_cache_range(dst, vma->vm_start, vma->vm_end); |
vma | 296 | mm/memory.c | flush_tlb_range(src, vma->vm_start, vma->vm_end); |
vma | 297 | mm/memory.c | flush_tlb_range(dst, vma->vm_start, vma->vm_end); |
vma | 590 | mm/memory.c | void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 599 | mm/memory.c | page_dir = pgd_offset(vma->vm_mm, address); |
vma | 625 | mm/memory.c | ++vma->vm_mm->rss; |
vma | 629 | mm/memory.c | flush_cache_page(vma, address); |
vma | 630 | mm/memory.c | set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); |
vma | 632 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 635 | mm/memory.c | flush_cache_page(vma, address); |
vma | 637 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 642 | mm/memory.c | flush_cache_page(vma, address); |
vma | 644 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 670 | mm/memory.c | struct vm_area_struct * vma; |
vma | 680 | mm/memory.c | vma = find_vma(current, start); |
vma | 681 | mm/memory.c | if (!vma) |
vma | 683 | mm/memory.c | if (vma->vm_start > start) |
vma | 691 | mm/memory.c | if (!(vma->vm_flags & VM_READ)) |
vma | 693 | mm/memory.c | if (vma->vm_end - start >= size) |
vma | 695 | mm/memory.c | next = vma->vm_next; |
vma | 696 | mm/memory.c | if (!next || vma->vm_end != next->vm_start) |
vma | 698 | mm/memory.c | vma = next; |
vma | 702 | mm/memory.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 707 | mm/memory.c | if (vma->vm_end - start >= size) |
vma | 709 | mm/memory.c | if (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start) |
vma | 711 | mm/memory.c | vma = vma->vm_next; |
vma | 712 | mm/memory.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 724 | mm/memory.c | do_wp_page(current, vma, start, 1); |
vma | 729 | mm/memory.c | if (start < vma->vm_end) |
vma | 731 | mm/memory.c | vma = vma->vm_next; |
vma | 732 | mm/memory.c | if (!vma || vma->vm_start != start) |
vma | 734 | mm/memory.c | if (!(vma->vm_flags & VM_WRITE)) |
vma | 740 | mm/memory.c | if (!(vma->vm_flags & VM_GROWSDOWN)) |
vma | 742 | mm/memory.c | if (expand_stack(vma, start) == 0) |
vma | 749 | mm/memory.c | static inline void get_empty_page(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 754 | mm/memory.c | pte = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot)); |
vma | 757 | mm/memory.c | pte = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); |
vma | 758 | mm/memory.c | vma->vm_mm->rss++; |
vma | 772 | mm/memory.c | static void partial_clear(struct vm_area_struct *vma, unsigned long address) |
vma | 778 | mm/memory.c | page_dir = pgd_offset(vma->vm_mm, address); |
vma | 798 | mm/memory.c | flush_cache_page(vma, address); |
vma | 872 | mm/memory.c | struct vm_area_struct * vma, unsigned long address, |
vma | 877 | mm/memory.c | if (!vma->vm_ops || !vma->vm_ops->swapin) { |
vma | 878 | mm/memory.c | swap_in(tsk, vma, page_table, pte_val(entry), write_access); |
vma | 882 | mm/memory.c | page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry)); |
vma | 887 | mm/memory.c | if (mem_map[MAP_NR(pte_page(page))].count > 1 && !(vma->vm_flags & VM_SHARED)) |
vma | 889 | mm/memory.c | ++vma->vm_mm->rss; |
vma | 902 | mm/memory.c | void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 916 | mm/memory.c | do_swap_page(tsk, vma, address, page_table, entry, write_access); |
vma | 920 | mm/memory.c | if (!vma->vm_ops || !vma->vm_ops->nopage) { |
vma | 921 | mm/memory.c | flush_cache_page(vma, address); |
vma | 922 | mm/memory.c | get_empty_page(tsk, vma, page_table, write_access); |
vma | 926 | mm/memory.c | ++vma->vm_mm->rss; |
vma | 932 | mm/memory.c | page = vma->vm_ops->nopage(vma, address, write_access && !(vma->vm_flags & VM_SHARED)); |
vma | 935 | mm/memory.c | flush_cache_page(vma, address); |
vma | 937 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 951 | mm/memory.c | entry = mk_pte(page, vma->vm_page_prot); |
vma | 954 | mm/memory.c | } else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED)) |
vma | 956 | mm/memory.c | flush_cache_page(vma, address); |
vma | 958 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 974 | mm/memory.c | static inline void handle_pte_fault(struct vm_area_struct * vma, unsigned long address, |
vma | 978 | mm/memory.c | do_no_page(current, vma, address, write_access); |
vma | 982 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 987 | mm/memory.c | flush_tlb_page(vma, address); |
vma | 990 | mm/memory.c | do_wp_page(current, vma, address, write_access); |
vma | 993 | mm/memory.c | void handle_mm_fault(struct vm_area_struct * vma, unsigned long address, |
vma | 1000 | mm/memory.c | pgd = pgd_offset(vma->vm_mm, address); |
vma | 1007 | mm/memory.c | handle_pte_fault(vma, address, write_access, pte); |
vma | 1008 | mm/memory.c | update_mmu_cache(vma, address, *pte); |
vma | 20 | mm/mlock.c | static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags) |
vma | 22 | mm/mlock.c | vma->vm_flags = newflags; |
vma | 26 | mm/mlock.c | static inline int mlock_fixup_start(struct vm_area_struct * vma, |
vma | 34 | mm/mlock.c | *n = *vma; |
vma | 35 | mm/mlock.c | vma->vm_start = end; |
vma | 37 | mm/mlock.c | vma->vm_offset += vma->vm_start - n->vm_start; |
vma | 47 | mm/mlock.c | static inline int mlock_fixup_end(struct vm_area_struct * vma, |
vma | 55 | mm/mlock.c | *n = *vma; |
vma | 56 | mm/mlock.c | vma->vm_end = start; |
vma | 58 | mm/mlock.c | n->vm_offset += n->vm_start - vma->vm_start; |
vma | 68 | mm/mlock.c | static inline int mlock_fixup_middle(struct vm_area_struct * vma, |
vma | 81 | mm/mlock.c | *left = *vma; |
vma | 82 | mm/mlock.c | *right = *vma; |
vma | 84 | mm/mlock.c | vma->vm_start = start; |
vma | 85 | mm/mlock.c | vma->vm_end = end; |
vma | 87 | mm/mlock.c | vma->vm_offset += vma->vm_start - left->vm_start; |
vma | 89 | mm/mlock.c | vma->vm_flags = newflags; |
vma | 90 | mm/mlock.c | if (vma->vm_inode) |
vma | 91 | mm/mlock.c | vma->vm_inode->i_count += 2; |
vma | 92 | mm/mlock.c | if (vma->vm_ops && vma->vm_ops->open) { |
vma | 93 | mm/mlock.c | vma->vm_ops->open(left); |
vma | 94 | mm/mlock.c | vma->vm_ops->open(right); |
vma | 101 | mm/mlock.c | static int mlock_fixup(struct vm_area_struct * vma, |
vma | 106 | mm/mlock.c | if (newflags == vma->vm_flags) |
vma | 109 | mm/mlock.c | if (start == vma->vm_start) { |
vma | 110 | mm/mlock.c | if (end == vma->vm_end) |
vma | 111 | mm/mlock.c | retval = mlock_fixup_all(vma, newflags); |
vma | 113 | mm/mlock.c | retval = mlock_fixup_start(vma, end, newflags); |
vma | 115 | mm/mlock.c | if (end == vma->vm_end) |
vma | 116 | mm/mlock.c | retval = mlock_fixup_end(vma, start, newflags); |
vma | 118 | mm/mlock.c | retval = mlock_fixup_middle(vma, start, end, newflags); |
vma | 125 | mm/mlock.c | vma->vm_mm->locked_vm += pages; |
vma | 140 | mm/mlock.c | struct vm_area_struct * vma, * next; |
vma | 151 | mm/mlock.c | vma = find_vma(current, start); |
vma | 152 | mm/mlock.c | if (!vma || vma->vm_start > start) |
vma | 160 | mm/mlock.c | newflags = vma->vm_flags | VM_LOCKED; |
vma | 164 | mm/mlock.c | if (vma->vm_end >= end) { |
vma | 165 | mm/mlock.c | error = mlock_fixup(vma, nstart, end, newflags); |
vma | 169 | mm/mlock.c | tmp = vma->vm_end; |
vma | 170 | mm/mlock.c | next = vma->vm_next; |
vma | 171 | mm/mlock.c | error = mlock_fixup(vma, nstart, tmp, newflags); |
vma | 175 | mm/mlock.c | vma = next; |
vma | 176 | mm/mlock.c | if (!vma || vma->vm_start != nstart) { |
vma | 222 | mm/mlock.c | struct vm_area_struct * vma; |
vma | 233 | mm/mlock.c | for (vma = current->mm->mmap; vma ; vma = vma->vm_next) { |
vma | 236 | mm/mlock.c | newflags = vma->vm_flags | VM_LOCKED; |
vma | 239 | mm/mlock.c | error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags); |
vma | 45 | mm/mmap.c | struct vm_area_struct * vma; |
vma | 121 | mm/mmap.c | vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct), |
vma | 123 | mm/mmap.c | if (!vma) |
vma | 126 | mm/mmap.c | vma->vm_mm = current->mm; |
vma | 127 | mm/mmap.c | vma->vm_start = addr; |
vma | 128 | mm/mmap.c | vma->vm_end = addr + len; |
vma | 129 | mm/mmap.c | vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC); |
vma | 130 | mm/mmap.c | vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE); |
vma | 131 | mm/mmap.c | vma->vm_flags |= current->mm->def_flags; |
vma | 135 | mm/mmap.c | vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
vma | 137 | mm/mmap.c | vma->vm_flags |= VM_SHARED | VM_MAYSHARE; |
vma | 149 | mm/mmap.c | vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED); |
vma | 152 | mm/mmap.c | vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
vma | 153 | mm/mmap.c | vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; |
vma | 154 | mm/mmap.c | vma->vm_ops = NULL; |
vma | 155 | mm/mmap.c | vma->vm_offset = off; |
vma | 156 | mm/mmap.c | vma->vm_inode = NULL; |
vma | 157 | mm/mmap.c | vma->vm_pte = 0; |
vma | 162 | mm/mmap.c | int error = file->f_op->mmap(file->f_inode, file, vma); |
vma | 165 | mm/mmap.c | kfree(vma); |
vma | 170 | mm/mmap.c | flags = vma->vm_flags; |
vma | 171 | mm/mmap.c | insert_vm_struct(current, vma); |
vma | 172 | mm/mmap.c | merge_segments(current, vma->vm_start, vma->vm_end); |
vma | 481 | mm/mmap.c | static void printk_list (struct vm_area_struct * vma) |
vma | 484 | mm/mmap.c | while (vma) { |
vma | 485 | mm/mmap.c | printk("%08lX-%08lX", vma->vm_start, vma->vm_end); |
vma | 486 | mm/mmap.c | vma = vma->vm_next; |
vma | 487 | mm/mmap.c | if (!vma) |
vma | 749 | mm/mmap.c | struct vm_area_struct * vma; |
vma | 752 | mm/mmap.c | for (vma = mm->mmap; vma; vma = vma->vm_next) |
vma | 753 | mm/mmap.c | avl_insert(vma, &mm->mmap_avl); |
vma | 88 | mm/mprotect.c | static inline int mprotect_fixup_all(struct vm_area_struct * vma, |
vma | 91 | mm/mprotect.c | vma->vm_flags = newflags; |
vma | 92 | mm/mprotect.c | vma->vm_page_prot = prot; |
vma | 96 | mm/mprotect.c | static inline int mprotect_fixup_start(struct vm_area_struct * vma, |
vma | 105 | mm/mprotect.c | *n = *vma; |
vma | 106 | mm/mprotect.c | vma->vm_start = end; |
vma | 108 | mm/mprotect.c | vma->vm_offset += vma->vm_start - n->vm_start; |
vma | 119 | mm/mprotect.c | static inline int mprotect_fixup_end(struct vm_area_struct * vma, |
vma | 128 | mm/mprotect.c | *n = *vma; |
vma | 129 | mm/mprotect.c | vma->vm_end = start; |
vma | 131 | mm/mprotect.c | n->vm_offset += n->vm_start - vma->vm_start; |
vma | 142 | mm/mprotect.c | static inline int mprotect_fixup_middle(struct vm_area_struct * vma, |
vma | 156 | mm/mprotect.c | *left = *vma; |
vma | 157 | mm/mprotect.c | *right = *vma; |
vma | 159 | mm/mprotect.c | vma->vm_start = start; |
vma | 160 | mm/mprotect.c | vma->vm_end = end; |
vma | 162 | mm/mprotect.c | vma->vm_offset += vma->vm_start - left->vm_start; |
vma | 164 | mm/mprotect.c | vma->vm_flags = newflags; |
vma | 165 | mm/mprotect.c | vma->vm_page_prot = prot; |
vma | 166 | mm/mprotect.c | if (vma->vm_inode) |
vma | 167 | mm/mprotect.c | vma->vm_inode->i_count += 2; |
vma | 168 | mm/mprotect.c | if (vma->vm_ops && vma->vm_ops->open) { |
vma | 169 | mm/mprotect.c | vma->vm_ops->open(left); |
vma | 170 | mm/mprotect.c | vma->vm_ops->open(right); |
vma | 177 | mm/mprotect.c | static int mprotect_fixup(struct vm_area_struct * vma, |
vma | 183 | mm/mprotect.c | if (newflags == vma->vm_flags) |
vma | 186 | mm/mprotect.c | if (start == vma->vm_start) |
vma | 187 | mm/mprotect.c | if (end == vma->vm_end) |
vma | 188 | mm/mprotect.c | error = mprotect_fixup_all(vma, newflags, newprot); |
vma | 190 | mm/mprotect.c | error = mprotect_fixup_start(vma, end, newflags, newprot); |
vma | 191 | mm/mprotect.c | else if (end == vma->vm_end) |
vma | 192 | mm/mprotect.c | error = mprotect_fixup_end(vma, start, newflags, newprot); |
vma | 194 | mm/mprotect.c | error = mprotect_fixup_middle(vma, start, end, newflags, newprot); |
vma | 206 | mm/mprotect.c | struct vm_area_struct * vma, * next; |
vma | 219 | mm/mprotect.c | vma = find_vma(current, start); |
vma | 220 | mm/mprotect.c | if (!vma || vma->vm_start > start) |
vma | 228 | mm/mprotect.c | newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC)); |
vma | 234 | mm/mprotect.c | if (vma->vm_end >= end) { |
vma | 235 | mm/mprotect.c | error = mprotect_fixup(vma, nstart, end, newflags); |
vma | 239 | mm/mprotect.c | tmp = vma->vm_end; |
vma | 240 | mm/mprotect.c | next = vma->vm_next; |
vma | 241 | mm/mprotect.c | error = mprotect_fixup(vma, nstart, tmp, newflags); |
vma | 245 | mm/mprotect.c | vma = next; |
vma | 246 | mm/mprotect.c | if (!vma || vma->vm_start != nstart) { |
vma | 127 | mm/mremap.c | static inline unsigned long move_vma(struct vm_area_struct * vma, |
vma | 138 | mm/mremap.c | *new_vma = *vma; |
vma | 141 | mm/mremap.c | new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start); |
vma | 164 | mm/mremap.c | struct vm_area_struct *vma; |
vma | 185 | mm/mremap.c | vma = find_vma(current, addr); |
vma | 186 | mm/mremap.c | if (!vma || vma->vm_start > addr) |
vma | 189 | mm/mremap.c | if (old_len > vma->vm_end - addr) |
vma | 191 | mm/mremap.c | if (vma->vm_flags & VM_LOCKED) { |
vma | 199 | mm/mremap.c | if (old_len == vma->vm_end - addr) { |
vma | 201 | mm/mremap.c | if (vma->vm_next) |
vma | 202 | mm/mremap.c | max_addr = vma->vm_next->vm_start; |
vma | 206 | mm/mremap.c | vma->vm_end = addr + new_len; |
vma | 208 | mm/mremap.c | if (vma->vm_flags & VM_LOCKED) |
vma | 219 | mm/mremap.c | return move_vma(vma, addr, old_len, new_len); |
vma | 291 | mm/page_alloc.c | void swap_in(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 311 | mm/page_alloc.c | vma->vm_mm->rss++; |
vma | 315 | mm/page_alloc.c | set_pte(page_table, mk_pte(page, vma->vm_page_prot)); |
vma | 318 | mm/page_alloc.c | set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)))); |
vma | 163 | mm/swapfile.c | static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address, |
vma | 189 | mm/swapfile.c | set_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)))); |
vma | 190 | mm/swapfile.c | ++vma->vm_mm->rss; |
vma | 195 | mm/swapfile.c | static inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, |
vma | 216 | mm/swapfile.c | if (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page)) |
vma | 224 | mm/swapfile.c | static inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, |
vma | 245 | mm/swapfile.c | if (unuse_pmd(vma, pmd, address, end - address, offset, type, page)) |
vma | 253 | mm/swapfile.c | static int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir, |
vma | 258 | mm/swapfile.c | if (unuse_pgd(vma, pgdir, start, end - start, type, page)) |
vma | 268 | mm/swapfile.c | struct vm_area_struct* vma; |
vma | 275 | mm/swapfile.c | vma = mm->mmap; |
vma | 276 | mm/swapfile.c | while (vma) { |
vma | 277 | mm/swapfile.c | pgd_t * pgd = pgd_offset(mm, vma->vm_start); |
vma | 278 | mm/swapfile.c | if (unuse_vma(vma, pgd, vma->vm_start, vma->vm_end, type, page)) |
vma | 280 | mm/swapfile.c | vma = vma->vm_next; |
vma | 70 | mm/vmscan.c | static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struct* vma, |
vma | 103 | mm/vmscan.c | if (vma->vm_ops && vma->vm_ops->swapout) { |
vma | 105 | mm/vmscan.c | vma->vm_mm->rss--; |
vma | 106 | mm/vmscan.c | if (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table)) |
vma | 113 | mm/vmscan.c | vma->vm_mm->rss--; |
vma | 114 | mm/vmscan.c | flush_cache_page(vma, address); |
vma | 116 | mm/vmscan.c | flush_tlb_page(vma, address); |
vma | 129 | mm/vmscan.c | vma->vm_mm->rss--; |
vma | 130 | mm/vmscan.c | flush_cache_page(vma, address); |
vma | 132 | mm/vmscan.c | flush_tlb_page(vma, address); |
vma | 136 | mm/vmscan.c | vma->vm_mm->rss--; |
vma | 137 | mm/vmscan.c | flush_cache_page(vma, address); |
vma | 139 | mm/vmscan.c | flush_tlb_page(vma, address); |
vma | 159 | mm/vmscan.c | static inline int swap_out_pmd(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 182 | mm/vmscan.c | result = try_to_swap_out(tsk, vma, address, pte, dma, wait); |
vma | 191 | mm/vmscan.c | static inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 212 | mm/vmscan.c | int result = swap_out_pmd(tsk, vma, pmd, address, end, dma, wait); |
vma | 221 | mm/vmscan.c | static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma, |
vma | 228 | mm/vmscan.c | if (vma->vm_flags & (VM_SHM | VM_LOCKED)) |
vma | 231 | mm/vmscan.c | end = vma->vm_end; |
vma | 233 | mm/vmscan.c | int result = swap_out_pgd(tsk, vma, pgdir, start, end, dma, wait); |
vma | 245 | mm/vmscan.c | struct vm_area_struct* vma; |
vma | 256 | mm/vmscan.c | vma = find_vma(p, address); |
vma | 257 | mm/vmscan.c | if (!vma) |
vma | 259 | mm/vmscan.c | if (address < vma->vm_start) |
vma | 260 | mm/vmscan.c | address = vma->vm_start; |
vma | 263 | mm/vmscan.c | int result = swap_out_vma(p, vma, pgd_offset(p->mm, address), address, dma, wait); |
vma | 266 | mm/vmscan.c | vma = vma->vm_next; |
vma | 267 | mm/vmscan.c | if (!vma) |
vma | 269 | mm/vmscan.c | address = vma->vm_start; |