tag | line | file | source code |
shmd | 24 | ipc/shm.c | static int shm_map (struct vm_area_struct *shmd); |
shmd | 26 | ipc/shm.c | static void shm_open (struct vm_area_struct *shmd); |
shmd | 27 | ipc/shm.c | static void shm_close (struct vm_area_struct *shmd); |
shmd | 377 | ipc/shm.c | static inline void insert_attach (struct shmid_ds * shp, struct vm_area_struct * shmd) |
shmd | 382 | ipc/shm.c | shmd->vm_next_share = attaches; |
shmd | 383 | ipc/shm.c | shmd->vm_prev_share = attaches->vm_prev_share; |
shmd | 384 | ipc/shm.c | shmd->vm_prev_share->vm_next_share = shmd; |
shmd | 385 | ipc/shm.c | attaches->vm_prev_share = shmd; |
shmd | 387 | ipc/shm.c | shp->attaches = shmd->vm_next_share = shmd->vm_prev_share = shmd; |
shmd | 391 | ipc/shm.c | static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct * shmd) |
shmd | 393 | ipc/shm.c | if (shmd->vm_next_share == shmd) { |
shmd | 394 | ipc/shm.c | if (shp->attaches != shmd) { |
shmd | 396 | ipc/shm.c | SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK); |
shmd | 398 | ipc/shm.c | shmd->vm_start, shmd->vm_end, |
shmd | 399 | ipc/shm.c | shmd->vm_flags & VM_READ ? 'r' : '-', |
shmd | 400 | ipc/shm.c | shmd->vm_flags & VM_WRITE ? 'w' : '-', |
shmd | 401 | ipc/shm.c | shmd->vm_flags & VM_EXEC ? 'x' : '-', |
shmd | 402 | ipc/shm.c | shmd->vm_flags & VM_MAYSHARE ? 's' : 'p', |
shmd | 403 | ipc/shm.c | shmd->vm_offset, shmd->vm_pte); |
shmd | 407 | ipc/shm.c | if (shp->attaches == shmd) |
shmd | 408 | ipc/shm.c | shp->attaches = shmd->vm_next_share; |
shmd | 409 | ipc/shm.c | shmd->vm_prev_share->vm_next_share = shmd->vm_next_share; |
shmd | 410 | ipc/shm.c | shmd->vm_next_share->vm_prev_share = shmd->vm_prev_share; |
shmd | 418 | ipc/shm.c | static int shm_map (struct vm_area_struct *shmd) |
shmd | 426 | ipc/shm.c | do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start); |
shmd | 429 | ipc/shm.c | current->mm->total_vm += (shmd->vm_end - shmd->vm_start) >> PAGE_SHIFT; |
shmd | 430 | ipc/shm.c | insert_vm_struct(current, shmd); |
shmd | 431 | ipc/shm.c | merge_segments(current, shmd->vm_start, shmd->vm_end); |
shmd | 434 | ipc/shm.c | shm_sgn = shmd->vm_pte + |
shmd | 435 | ipc/shm.c | SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT); |
shmd | 436 | ipc/shm.c | flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end); |
shmd | 437 | ipc/shm.c | for (tmp = shmd->vm_start; |
shmd | 438 | ipc/shm.c | tmp < shmd->vm_end; |
shmd | 441 | ipc/shm.c | page_dir = pgd_offset(shmd->vm_mm,tmp); |
shmd | 450 | ipc/shm.c | flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end); |
shmd | 460 | ipc/shm.c | struct vm_area_struct *shmd; |
shmd | 498 | ipc/shm.c | if ((shmd = find_vma_intersection(current, addr, addr + shp->shm_segsz))) { |
shmd | 509 | ipc/shm.c | shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL); |
shmd | 510 | ipc/shm.c | if (!shmd) |
shmd | 513 | ipc/shm.c | kfree(shmd); |
shmd | 517 | ipc/shm.c | shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id); |
shmd | 518 | ipc/shm.c | shmd->vm_start = addr; |
shmd | 519 | ipc/shm.c | shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE; |
shmd | 520 | ipc/shm.c | shmd->vm_mm = current->mm; |
shmd | 521 | ipc/shm.c | shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED; |
shmd | 522 | ipc/shm.c | shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED |
shmd | 525 | ipc/shm.c | shmd->vm_next_share = shmd->vm_prev_share = NULL; |
shmd | 526 | ipc/shm.c | shmd->vm_inode = NULL; |
shmd | 527 | ipc/shm.c | shmd->vm_offset = 0; |
shmd | 528 | ipc/shm.c | shmd->vm_ops = &shm_vm_ops; |
shmd | 531 | ipc/shm.c | if ((err = shm_map (shmd))) { |
shmd | 534 | ipc/shm.c | kfree(shmd); |
shmd | 538 | ipc/shm.c | insert_attach(shp,shmd); /* insert shmd into shp->attaches */ |
shmd | 548 | ipc/shm.c | static void shm_open (struct vm_area_struct *shmd) |
shmd | 553 | ipc/shm.c | id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK; |
shmd | 559 | ipc/shm.c | insert_attach(shp,shmd); /* insert shmd into shp->attaches */ |
shmd | 571 | ipc/shm.c | static void shm_close (struct vm_area_struct *shmd) |
shmd | 577 | ipc/shm.c | id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK; |
shmd | 579 | ipc/shm.c | remove_attach(shp,shmd); /* remove from shp->attaches */ |
shmd | 592 | ipc/shm.c | struct vm_area_struct *shmd, *shmdnext; |
shmd | 594 | ipc/shm.c | for (shmd = current->mm->mmap; shmd; shmd = shmdnext) { |
shmd | 595 | ipc/shm.c | shmdnext = shmd->vm_next; |
shmd | 596 | ipc/shm.c | if (shmd->vm_ops == &shm_vm_ops |
shmd | 597 | ipc/shm.c | && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr) |
shmd | 598 | ipc/shm.c | do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start); |
shmd | 606 | ipc/shm.c | static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code) |
shmd | 613 | ipc/shm.c | if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) { |
shmd | 615 | ipc/shm.c | id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK); |
shmd | 669 | ipc/shm.c | return pte_modify(pte, shmd->vm_page_prot); |
shmd | 682 | ipc/shm.c | struct vm_area_struct *shmd; |
shmd | 725 | ipc/shm.c | for (shmd = shp->attaches; ; ) { |
shmd | 732 | ipc/shm.c | if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) { |
shmd | 734 | ipc/shm.c | id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK); |
shmd | 737 | ipc/shm.c | tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset; |
shmd | 738 | ipc/shm.c | if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end)) |
shmd | 740 | ipc/shm.c | page_dir = pgd_offset(shmd->vm_mm,tmp); |
shmd | 743 | ipc/shm.c | id, shmd->vm_start, idx); |
shmd | 750 | ipc/shm.c | id, shmd->vm_start, idx); |
shmd | 764 | ipc/shm.c | flush_cache_page(shmd, tmp); |
shmd | 766 | ipc/shm.c | __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT))); |
shmd | 768 | ipc/shm.c | if (shmd->vm_mm->rss > 0) |
shmd | 769 | ipc/shm.c | shmd->vm_mm->rss--; |
shmd | 770 | ipc/shm.c | flush_tlb_page(shmd, tmp); |
shmd | 773 | ipc/shm.c | if ((shmd = shmd->vm_next_share) == shp->attaches) |