tag | line | file | source code |
shmd | 20 | ipc/shm.c | static int shm_map (struct shm_desc *shmd, int remap); |
shmd | 330 | ipc/shm.c | static int shm_map (struct shm_desc *shmd, int remap) |
shmd | 335 | ipc/shm.c | unsigned long page_dir = shmd->task->tss.cr3; |
shmd | 338 | ipc/shm.c | for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) { |
shmd | 367 | ipc/shm.c | shm_sgn = shmd->shm_sgn; |
shmd | 368 | ipc/shm.c | for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE, |
shmd | 386 | ipc/shm.c | struct shm_desc *shmd; |
shmd | 409 | ipc/shm.c | for (shmd = current->shm; shmd; shmd = shmd->task_next) { |
shmd | 410 | ipc/shm.c | if (shmd->start < SHM_RANGE_START) |
shmd | 412 | ipc/shm.c | if (addr >= shmd->start) |
shmd | 413 | ipc/shm.c | addr = shmd->start; |
shmd | 425 | ipc/shm.c | for (shmd = current->shm; shmd; shmd = shmd->task_next) { |
shmd | 426 | ipc/shm.c | if (addr >= shmd->start && addr < shmd->end) |
shmd | 428 | ipc/shm.c | if (addr + shp->shm_segsz >= shmd->start && |
shmd | 429 | ipc/shm.c | addr + shp->shm_segsz < shmd->end) |
shmd | 438 | ipc/shm.c | shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL); |
shmd | 439 | ipc/shm.c | if (!shmd) |
shmd | 442 | ipc/shm.c | kfree_s (shmd, sizeof (*shmd)); |
shmd | 445 | ipc/shm.c | shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) | |
shmd | 447 | ipc/shm.c | shmd->start = addr; |
shmd | 448 | ipc/shm.c | shmd->end = addr + shp->shm_npages * PAGE_SIZE; |
shmd | 449 | ipc/shm.c | shmd->task = current; |
shmd | 458 | ipc/shm.c | if ((err = shm_map (shmd, shmflg & SHM_REMAP))) { |
shmd | 461 | ipc/shm.c | kfree_s (shmd, sizeof (*shmd)); |
shmd | 465 | ipc/shm.c | shmd->task_next = current->shm; |
shmd | 466 | ipc/shm.c | current->shm = shmd; |
shmd | 467 | ipc/shm.c | shmd->seg_next = shp->attaches; |
shmd | 468 | ipc/shm.c | shp->attaches = shmd; |
shmd | 484 | ipc/shm.c | struct shm_desc *shmd = *shmdp; |
shmd | 488 | ipc/shm.c | id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK; |
shmd | 490 | ipc/shm.c | *shmdp = shmd->task_next; |
shmd | 492 | ipc/shm.c | if (*shmdp == shmd) { |
shmd | 493 | ipc/shm.c | *shmdp = shmd->seg_next; |
shmd | 499 | ipc/shm.c | unmap_page_range (shmd->start, shp->shm_segsz); /* sleeps */ |
shmd | 500 | ipc/shm.c | kfree_s (shmd, sizeof (*shmd)); |
shmd | 514 | ipc/shm.c | struct shm_desc *shmd, **shmdp; |
shmd | 516 | ipc/shm.c | for (shmdp = ¤t->shm; (shmd = *shmdp); shmdp=&shmd->task_next) { |
shmd | 517 | ipc/shm.c | if (shmd->start == (ulong) shmaddr) { |
shmd | 542 | ipc/shm.c | struct shm_desc *shmd, *new_desc = NULL, *tmp; |
shmd | 548 | ipc/shm.c | for (shmd = p1->shm; shmd; shmd = shmd->task_next) { |
shmd | 559 | ipc/shm.c | *tmp = *shmd; |
shmd | 565 | ipc/shm.c | for (shmd = new_desc; shmd; shmd = shmd->task_next) { |
shmd | 566 | ipc/shm.c | id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK; |
shmd | 572 | ipc/shm.c | shmd->seg_next = shp->attaches; |
shmd | 573 | ipc/shm.c | shp->attaches = shmd; |
shmd | 651 | ipc/shm.c | struct shm_desc *shmd; |
shmd | 690 | ipc/shm.c | for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) { |
shmd | 692 | ipc/shm.c | if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) { |
shmd | 696 | ipc/shm.c | tmp = shmd->start + (idx << PAGE_SHIFT); |
shmd | 697 | ipc/shm.c | if (tmp >= shmd->end) { |
shmd | 701 | ipc/shm.c | pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp); |
shmd | 704 | ipc/shm.c | id, shmd->start, idx); |
shmd | 717 | ipc/shm.c | tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT; |
shmd | 720 | ipc/shm.c | shmd->task->rss--; |