tag | line | file | source code |
shmd | 20 | ipc/shm.c | static int shm_map (struct shm_desc *shmd, int remap); |
shmd | 331 | ipc/shm.c | static int shm_map (struct shm_desc *shmd, int remap) |
shmd | 336 | ipc/shm.c | unsigned long page_dir = shmd->task->tss.cr3; |
shmd | 339 | ipc/shm.c | for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) { |
shmd | 368 | ipc/shm.c | shm_sgn = shmd->shm_sgn; |
shmd | 369 | ipc/shm.c | for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE, |
shmd | 431 | ipc/shm.c | struct shm_desc *shmd; |
shmd | 454 | ipc/shm.c | for (shmd = current->shm; shmd; shmd = shmd->task_next) { |
shmd | 455 | ipc/shm.c | if (shmd->start < SHM_RANGE_START) |
shmd | 457 | ipc/shm.c | if (addr >= shmd->start) |
shmd | 458 | ipc/shm.c | addr = shmd->start; |
shmd | 470 | ipc/shm.c | for (shmd = current->shm; shmd; shmd = shmd->task_next) { |
shmd | 471 | ipc/shm.c | if (addr >= shmd->start && addr < shmd->end) |
shmd | 473 | ipc/shm.c | if (addr + shp->shm_segsz >= shmd->start && |
shmd | 474 | ipc/shm.c | addr + shp->shm_segsz < shmd->end) |
shmd | 483 | ipc/shm.c | shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL); |
shmd | 484 | ipc/shm.c | if (!shmd) |
shmd | 487 | ipc/shm.c | kfree(shmd); |
shmd | 490 | ipc/shm.c | shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) | |
shmd | 492 | ipc/shm.c | shmd->start = addr; |
shmd | 493 | ipc/shm.c | shmd->end = addr + shp->shm_npages * PAGE_SIZE; |
shmd | 494 | ipc/shm.c | shmd->task = current; |
shmd | 496 | ipc/shm.c | if ((err = add_vm_area(shmd->start, shmd->end - shmd->start, shmflg & SHM_RDONLY))) { |
shmd | 497 | ipc/shm.c | kfree(shmd); |
shmd | 503 | ipc/shm.c | if ((err = shm_map (shmd, shmflg & SHM_REMAP))) { |
shmd | 506 | ipc/shm.c | kfree(shmd); |
shmd | 510 | ipc/shm.c | shmd->task_next = current->shm; |
shmd | 511 | ipc/shm.c | current->shm = shmd; |
shmd | 512 | ipc/shm.c | shmd->seg_next = shp->attaches; |
shmd | 513 | ipc/shm.c | shp->attaches = shmd; |
shmd | 529 | ipc/shm.c | struct shm_desc *shmd = *shmdp; |
shmd | 533 | ipc/shm.c | id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK; |
shmd | 535 | ipc/shm.c | *shmdp = shmd->task_next; |
shmd | 537 | ipc/shm.c | if (*shmdp == shmd) { |
shmd | 538 | ipc/shm.c | *shmdp = shmd->seg_next; |
shmd | 544 | ipc/shm.c | do_munmap(shmd->start, shp->shm_segsz); |
shmd | 545 | ipc/shm.c | kfree(shmd); |
shmd | 559 | ipc/shm.c | struct shm_desc *shmd, **shmdp; |
shmd | 561 | ipc/shm.c | for (shmdp = ¤t->shm; (shmd = *shmdp); shmdp=&shmd->task_next) { |
shmd | 562 | ipc/shm.c | if (shmd->start == (ulong) shmaddr) { |
shmd | 587 | ipc/shm.c | struct shm_desc *shmd, *new_desc = NULL, *tmp; |
shmd | 595 | ipc/shm.c | for (shmd = p1->shm; shmd; shmd = shmd->task_next) { |
shmd | 606 | ipc/shm.c | *tmp = *shmd; |
shmd | 612 | ipc/shm.c | for (shmd = new_desc; shmd; shmd = shmd->task_next) { |
shmd | 613 | ipc/shm.c | id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK; |
shmd | 619 | ipc/shm.c | shmd->seg_next = shp->attaches; |
shmd | 620 | ipc/shm.c | shp->attaches = shmd; |
shmd | 695 | ipc/shm.c | struct shm_desc *shmd; |
shmd | 734 | ipc/shm.c | for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) { |
shmd | 736 | ipc/shm.c | if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) { |
shmd | 740 | ipc/shm.c | tmp = shmd->start + (idx << PAGE_SHIFT); |
shmd | 741 | ipc/shm.c | if (tmp >= shmd->end) { |
shmd | 745 | ipc/shm.c | pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp); |
shmd | 748 | ipc/shm.c | id, shmd->start, idx); |
shmd | 761 | ipc/shm.c | tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT; |
shmd | 764 | ipc/shm.c | shmd->task->mm->rss--; |