tag | line | file | source code |
shmd | 20 | ipc/shm.c | static int shm_map (struct shm_desc *shmd, int remap); |
shmd | 330 | ipc/shm.c | static int shm_map (struct shm_desc *shmd, int remap) |
shmd | 335 | ipc/shm.c | unsigned long page_dir = shmd->task->tss.cr3; |
shmd | 338 | ipc/shm.c | for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) { |
shmd | 367 | ipc/shm.c | shm_sgn = shmd->shm_sgn; |
shmd | 368 | ipc/shm.c | for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE, |
shmd | 386 | ipc/shm.c | struct shm_desc *shmd; |
shmd | 403 | ipc/shm.c | for (shmd = current->shm; shmd; shmd = shmd->task_next) { |
shmd | 404 | ipc/shm.c | if (shmd->start < SHM_RANGE_START) |
shmd | 406 | ipc/shm.c | if (addr >= shmd->start) |
shmd | 407 | ipc/shm.c | addr = shmd->start; |
shmd | 419 | ipc/shm.c | for (shmd = current->shm; shmd; shmd = shmd->task_next) { |
shmd | 420 | ipc/shm.c | if (addr >= shmd->start && addr < shmd->end) |
shmd | 422 | ipc/shm.c | if (addr + shp->shm_segsz >= shmd->start && |
shmd | 423 | ipc/shm.c | addr + shp->shm_segsz < shmd->end) |
shmd | 432 | ipc/shm.c | shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL); |
shmd | 433 | ipc/shm.c | if (!shmd) |
shmd | 436 | ipc/shm.c | kfree_s (shmd, sizeof (*shmd)); |
shmd | 439 | ipc/shm.c | shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) | |
shmd | 441 | ipc/shm.c | shmd->start = addr; |
shmd | 442 | ipc/shm.c | shmd->end = addr + shp->shm_npages * PAGE_SIZE; |
shmd | 443 | ipc/shm.c | shmd->task = current; |
shmd | 452 | ipc/shm.c | if ((err = shm_map (shmd, shmflg & SHM_REMAP))) { |
shmd | 455 | ipc/shm.c | kfree_s (shmd, sizeof (*shmd)); |
shmd | 459 | ipc/shm.c | shmd->task_next = current->shm; |
shmd | 460 | ipc/shm.c | current->shm = shmd; |
shmd | 461 | ipc/shm.c | shmd->seg_next = shp->attaches; |
shmd | 462 | ipc/shm.c | shp->attaches = shmd; |
shmd | 476 | ipc/shm.c | struct shm_desc *shmd = *shmdp; |
shmd | 480 | ipc/shm.c | id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK; |
shmd | 482 | ipc/shm.c | *shmdp = shmd->task_next; |
shmd | 484 | ipc/shm.c | if (*shmdp == shmd) { |
shmd | 485 | ipc/shm.c | *shmdp = shmd->seg_next; |
shmd | 491 | ipc/shm.c | unmap_page_range (shmd->start, shp->shm_segsz); /* sleeps */ |
shmd | 492 | ipc/shm.c | kfree_s (shmd, sizeof (*shmd)); |
shmd | 506 | ipc/shm.c | struct shm_desc *shmd, **shmdp; |
shmd | 508 | ipc/shm.c | for (shmdp = ¤t->shm; (shmd = *shmdp); shmdp=&shmd->task_next) { |
shmd | 509 | ipc/shm.c | if (shmd->start == (ulong) shmaddr) { |
shmd | 534 | ipc/shm.c | struct shm_desc *shmd, *new_desc = NULL, *tmp; |
shmd | 540 | ipc/shm.c | for (shmd = p1->shm; shmd; shmd = shmd->task_next) { |
shmd | 551 | ipc/shm.c | *tmp = *shmd; |
shmd | 557 | ipc/shm.c | for (shmd = new_desc; shmd; shmd = shmd->task_next) { |
shmd | 558 | ipc/shm.c | id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK; |
shmd | 564 | ipc/shm.c | shmd->seg_next = shp->attaches; |
shmd | 565 | ipc/shm.c | shp->attaches = shmd; |
shmd | 643 | ipc/shm.c | struct shm_desc *shmd; |
shmd | 682 | ipc/shm.c | for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) { |
shmd | 684 | ipc/shm.c | if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) { |
shmd | 688 | ipc/shm.c | tmp = shmd->start + (idx << PAGE_SHIFT); |
shmd | 689 | ipc/shm.c | if (tmp >= shmd->end) { |
shmd | 693 | ipc/shm.c | pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp); |
shmd | 696 | ipc/shm.c | id, shmd->start, idx); |
shmd | 709 | ipc/shm.c | tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT; |
shmd | 712 | ipc/shm.c | shmd->task->rss--; |