This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- insert_attach
- remove_attach
- shm_map
- sys_shmat
- shm_open
- shm_close
- sys_shmdt
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/ipc.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/malloc.h>
15 #include <linux/swap.h>
16
17 #include <asm/segment.h>
18 #include <asm/pgtable.h>
19
20 extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
21 extern unsigned long get_swap_page (void);
22 static int findkey (key_t key);
23 static int newseg (key_t key, int shmflg, int size);
24 static int shm_map (struct vm_area_struct *shmd);
25 static void killseg (int id);
26 static void shm_open (struct vm_area_struct *shmd);
27 static void shm_close (struct vm_area_struct *shmd);
28 static pte_t shm_swap_in(struct vm_area_struct *, unsigned long, unsigned long);
29
30 static int shm_tot = 0;
31 static int shm_rss = 0;
32 static int shm_swp = 0;
33 static int max_shmid = 0;
34 static struct wait_queue *shm_lock = NULL;
35 static struct shmid_ds *shm_segs[SHMMNI];
36
37 static unsigned short shm_seq = 0;
38
39
40 static ulong swap_attempts = 0;
41 static ulong swap_successes = 0;
42 static ulong used_segs = 0;
43
44 void shm_init (void)
45 {
46 int id;
47
48 for (id = 0; id < SHMMNI; id++)
49 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
50 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
51 shm_lock = NULL;
52 return;
53 }
54
55 static int findkey (key_t key)
56 {
57 int id;
58 struct shmid_ds *shp;
59
60 for (id = 0; id <= max_shmid; id++) {
61 while ((shp = shm_segs[id]) == IPC_NOID)
62 sleep_on (&shm_lock);
63 if (shp == IPC_UNUSED)
64 continue;
65 if (key == shp->shm_perm.key)
66 return id;
67 }
68 return -1;
69 }
70
71
72
73
74 static int newseg (key_t key, int shmflg, int size)
75 {
76 struct shmid_ds *shp;
77 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
78 int id, i;
79
80 if (size < SHMMIN)
81 return -EINVAL;
82 if (shm_tot + numpages >= SHMALL)
83 return -ENOSPC;
84 for (id = 0; id < SHMMNI; id++)
85 if (shm_segs[id] == IPC_UNUSED) {
86 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
87 goto found;
88 }
89 return -ENOSPC;
90
91 found:
92 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
93 if (!shp) {
94 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
95 if (shm_lock)
96 wake_up (&shm_lock);
97 return -ENOMEM;
98 }
99
100 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
101 if (!shp->shm_pages) {
102 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
103 if (shm_lock)
104 wake_up (&shm_lock);
105 kfree(shp);
106 return -ENOMEM;
107 }
108
109 for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
110 shm_tot += numpages;
111 shp->shm_perm.key = key;
112 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
113 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
114 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
115 shp->shm_perm.seq = shm_seq;
116 shp->shm_segsz = size;
117 shp->shm_cpid = current->pid;
118 shp->attaches = NULL;
119 shp->shm_lpid = shp->shm_nattch = 0;
120 shp->shm_atime = shp->shm_dtime = 0;
121 shp->shm_ctime = CURRENT_TIME;
122 shp->shm_npages = numpages;
123
124 if (id > max_shmid)
125 max_shmid = id;
126 shm_segs[id] = shp;
127 used_segs++;
128 if (shm_lock)
129 wake_up (&shm_lock);
130 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
131 }
132
133 asmlinkage int sys_shmget (key_t key, int size, int shmflg)
134 {
135 struct shmid_ds *shp;
136 int id = 0;
137
138 if (size < 0 || size > SHMMAX)
139 return -EINVAL;
140 if (key == IPC_PRIVATE)
141 return newseg(key, shmflg, size);
142 if ((id = findkey (key)) == -1) {
143 if (!(shmflg & IPC_CREAT))
144 return -ENOENT;
145 return newseg(key, shmflg, size);
146 }
147 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
148 return -EEXIST;
149 shp = shm_segs[id];
150 if (shp->shm_perm.mode & SHM_DEST)
151 return -EIDRM;
152 if (size > shp->shm_segsz)
153 return -EINVAL;
154 if (ipcperms (&shp->shm_perm, shmflg))
155 return -EACCES;
156 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
157 }
158
159
160
161
162
163 static void killseg (int id)
164 {
165 struct shmid_ds *shp;
166 int i, numpages;
167
168 shp = shm_segs[id];
169 if (shp == IPC_NOID || shp == IPC_UNUSED) {
170 printk ("shm nono: killseg called on unused seg id=%d\n", id);
171 return;
172 }
173 shp->shm_perm.seq++;
174 shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI);
175 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
176 used_segs--;
177 if (id == max_shmid)
178 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
179 if (!shp->shm_pages) {
180 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
181 return;
182 }
183 numpages = shp->shm_npages;
184 for (i = 0; i < numpages ; i++) {
185 pte_t pte;
186 pte_val(pte) = shp->shm_pages[i];
187 if (pte_none(pte))
188 continue;
189 if (pte_present(pte)) {
190 free_page (pte_page(pte));
191 shm_rss--;
192 } else {
193 swap_free(pte_val(pte));
194 shm_swp--;
195 }
196 }
197 kfree(shp->shm_pages);
198 shm_tot -= numpages;
199 kfree(shp);
200 return;
201 }
202
203 asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
204 {
205 struct shmid_ds tbuf;
206 struct shmid_ds *shp;
207 struct ipc_perm *ipcp;
208 int id, err;
209
210 if (cmd < 0 || shmid < 0)
211 return -EINVAL;
212 if (cmd == IPC_SET) {
213 if (!buf)
214 return -EFAULT;
215 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
216 if (err)
217 return err;
218 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
219 }
220
221 switch (cmd) {
222 case IPC_INFO:
223 {
224 struct shminfo shminfo;
225 if (!buf)
226 return -EFAULT;
227 shminfo.shmmni = SHMMNI;
228 shminfo.shmmax = SHMMAX;
229 shminfo.shmmin = SHMMIN;
230 shminfo.shmall = SHMALL;
231 shminfo.shmseg = SHMSEG;
232 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
233 if (err)
234 return err;
235 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
236 return max_shmid;
237 }
238 case SHM_INFO:
239 {
240 struct shm_info shm_info;
241 if (!buf)
242 return -EFAULT;
243 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
244 if (err)
245 return err;
246 shm_info.used_ids = used_segs;
247 shm_info.shm_rss = shm_rss;
248 shm_info.shm_tot = shm_tot;
249 shm_info.shm_swp = shm_swp;
250 shm_info.swap_attempts = swap_attempts;
251 shm_info.swap_successes = swap_successes;
252 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
253 return max_shmid;
254 }
255 case SHM_STAT:
256 if (!buf)
257 return -EFAULT;
258 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
259 if (err)
260 return err;
261 if (shmid > max_shmid)
262 return -EINVAL;
263 shp = shm_segs[shmid];
264 if (shp == IPC_UNUSED || shp == IPC_NOID)
265 return -EINVAL;
266 if (ipcperms (&shp->shm_perm, S_IRUGO))
267 return -EACCES;
268 id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
269 tbuf.shm_perm = shp->shm_perm;
270 tbuf.shm_segsz = shp->shm_segsz;
271 tbuf.shm_atime = shp->shm_atime;
272 tbuf.shm_dtime = shp->shm_dtime;
273 tbuf.shm_ctime = shp->shm_ctime;
274 tbuf.shm_cpid = shp->shm_cpid;
275 tbuf.shm_lpid = shp->shm_lpid;
276 tbuf.shm_nattch = shp->shm_nattch;
277 memcpy_tofs (buf, &tbuf, sizeof(*buf));
278 return id;
279 }
280
281 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
282 if (shp == IPC_UNUSED || shp == IPC_NOID)
283 return -EINVAL;
284 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
285 return -EIDRM;
286 ipcp = &shp->shm_perm;
287
288 switch (cmd) {
289 case SHM_UNLOCK:
290 if (!suser())
291 return -EPERM;
292 if (!(ipcp->mode & SHM_LOCKED))
293 return -EINVAL;
294 ipcp->mode &= ~SHM_LOCKED;
295 break;
296 case SHM_LOCK:
297
298
299
300 if (!suser())
301 return -EPERM;
302 if (ipcp->mode & SHM_LOCKED)
303 return -EINVAL;
304 ipcp->mode |= SHM_LOCKED;
305 break;
306 case IPC_STAT:
307 if (ipcperms (ipcp, S_IRUGO))
308 return -EACCES;
309 if (!buf)
310 return -EFAULT;
311 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
312 if (err)
313 return err;
314 tbuf.shm_perm = shp->shm_perm;
315 tbuf.shm_segsz = shp->shm_segsz;
316 tbuf.shm_atime = shp->shm_atime;
317 tbuf.shm_dtime = shp->shm_dtime;
318 tbuf.shm_ctime = shp->shm_ctime;
319 tbuf.shm_cpid = shp->shm_cpid;
320 tbuf.shm_lpid = shp->shm_lpid;
321 tbuf.shm_nattch = shp->shm_nattch;
322 memcpy_tofs (buf, &tbuf, sizeof(*buf));
323 break;
324 case IPC_SET:
325 if (suser() || current->euid == shp->shm_perm.uid ||
326 current->euid == shp->shm_perm.cuid) {
327 ipcp->uid = tbuf.shm_perm.uid;
328 ipcp->gid = tbuf.shm_perm.gid;
329 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
330 | (tbuf.shm_perm.mode & S_IRWXUGO);
331 shp->shm_ctime = CURRENT_TIME;
332 break;
333 }
334 return -EPERM;
335 case IPC_RMID:
336 if (suser() || current->euid == shp->shm_perm.uid ||
337 current->euid == shp->shm_perm.cuid) {
338 shp->shm_perm.mode |= SHM_DEST;
339 if (shp->shm_nattch <= 0)
340 killseg (id);
341 break;
342 }
343 return -EPERM;
344 default:
345 return -EINVAL;
346 }
347 return 0;
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 static struct vm_operations_struct shm_vm_ops = {
364 shm_open,
365 shm_close,
366 NULL,
367 NULL,
368 NULL,
369 NULL,
370 NULL,
371 NULL,
372 NULL,
373 shm_swap_in
374 };
375
376
377 static inline void insert_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
378 {
379 struct vm_area_struct * attaches;
380
381 if ((attaches = shp->attaches)) {
382 shmd->vm_next_share = attaches;
383 shmd->vm_prev_share = attaches->vm_prev_share;
384 shmd->vm_prev_share->vm_next_share = shmd;
385 attaches->vm_prev_share = shmd;
386 } else
387 shp->attaches = shmd->vm_next_share = shmd->vm_prev_share = shmd;
388 }
389
390
391 static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
392 {
393 if (shmd->vm_next_share == shmd) {
394 if (shp->attaches != shmd) {
395 printk("shm_close: shm segment (id=%ld) attach list inconsistent\n",
396 SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
397 printk("shm_close: %08lx-%08lx %c%c%c%c %08lx %08lx\n",
398 shmd->vm_start, shmd->vm_end,
399 shmd->vm_flags & VM_READ ? 'r' : '-',
400 shmd->vm_flags & VM_WRITE ? 'w' : '-',
401 shmd->vm_flags & VM_EXEC ? 'x' : '-',
402 shmd->vm_flags & VM_MAYSHARE ? 's' : 'p',
403 shmd->vm_offset, shmd->vm_pte);
404 }
405 shp->attaches = NULL;
406 } else {
407 if (shp->attaches == shmd)
408 shp->attaches = shmd->vm_next_share;
409 shmd->vm_prev_share->vm_next_share = shmd->vm_next_share;
410 shmd->vm_next_share->vm_prev_share = shmd->vm_prev_share;
411 }
412 }
413
414
415
416
417
418 static int shm_map (struct vm_area_struct *shmd)
419 {
420 pgd_t *page_dir;
421 pmd_t *page_middle;
422 pte_t *page_table;
423 unsigned long tmp, shm_sgn;
424
425
426 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
427
428
429 current->mm->total_vm += (shmd->vm_end - shmd->vm_start) >> PAGE_SHIFT;
430 insert_vm_struct(current, shmd);
431 merge_segments(current, shmd->vm_start, shmd->vm_end);
432
433
434 shm_sgn = shmd->vm_pte +
435 SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
436 for (tmp = shmd->vm_start;
437 tmp < shmd->vm_end;
438 tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
439 {
440 page_dir = pgd_offset(shmd->vm_mm,tmp);
441 page_middle = pmd_alloc(page_dir,tmp);
442 if (!page_middle)
443 return -ENOMEM;
444 page_table = pte_alloc(page_middle,tmp);
445 if (!page_table)
446 return -ENOMEM;
447 set_pte(page_table, __pte(shm_sgn));
448 }
449 invalidate_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
450 return 0;
451 }
452
453
454
455
456 asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
457 {
458 struct shmid_ds *shp;
459 struct vm_area_struct *shmd;
460 int err;
461 unsigned int id;
462 unsigned long addr;
463
464 if (shmid < 0) {
465
466 return -EINVAL;
467 }
468
469 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
470 if (shp == IPC_UNUSED || shp == IPC_NOID) {
471
472 return -EINVAL;
473 }
474
475 if (!(addr = (ulong) shmaddr)) {
476 if (shmflg & SHM_REMAP)
477 return -EINVAL;
478 if (!(addr = get_unmapped_area(0, shp->shm_segsz)))
479 return -ENOMEM;
480 } else if (addr & (SHMLBA-1)) {
481 if (shmflg & SHM_RND)
482 addr &= ~(SHMLBA-1);
483 else
484 return -EINVAL;
485 }
486
487
488
489
490 if (addr < current->mm->start_stack &&
491 addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
492 {
493
494 return -EINVAL;
495 }
496 if (!(shmflg & SHM_REMAP))
497 if ((shmd = find_vma_intersection(current, addr, addr + shp->shm_segsz))) {
498
499
500 return -EINVAL;
501 }
502
503 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
504 return -EACCES;
505 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
506 return -EIDRM;
507
508 shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
509 if (!shmd)
510 return -ENOMEM;
511 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
512 kfree(shmd);
513 return -EIDRM;
514 }
515
516 shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id);
517 shmd->vm_start = addr;
518 shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
519 shmd->vm_mm = current->mm;
520 shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
521 shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
522 | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
523 | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
524 shmd->vm_next_share = shmd->vm_prev_share = NULL;
525 shmd->vm_inode = NULL;
526 shmd->vm_offset = 0;
527 shmd->vm_ops = &shm_vm_ops;
528
529 shp->shm_nattch++;
530 if ((err = shm_map (shmd))) {
531 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
532 killseg(id);
533 kfree(shmd);
534 return err;
535 }
536
537 insert_attach(shp,shmd);
538
539 shp->shm_lpid = current->pid;
540 shp->shm_atime = CURRENT_TIME;
541
542 *raddr = addr;
543 return 0;
544 }
545
546
547 static void shm_open (struct vm_area_struct *shmd)
548 {
549 unsigned int id;
550 struct shmid_ds *shp;
551
552 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
553 shp = shm_segs[id];
554 if (shp == IPC_UNUSED) {
555 printk("shm_open: unused id=%d PANIC\n", id);
556 return;
557 }
558 insert_attach(shp,shmd);
559 shp->shm_nattch++;
560 shp->shm_atime = CURRENT_TIME;
561 shp->shm_lpid = current->pid;
562 }
563
564
565
566
567
568
569
570 static void shm_close (struct vm_area_struct *shmd)
571 {
572 struct shmid_ds *shp;
573 int id;
574
575
576 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
577 shp = shm_segs[id];
578 remove_attach(shp,shmd);
579 shp->shm_lpid = current->pid;
580 shp->shm_dtime = CURRENT_TIME;
581 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
582 killseg (id);
583 }
584
585
586
587
588
589 asmlinkage int sys_shmdt (char *shmaddr)
590 {
591 struct vm_area_struct *shmd, *shmdnext;
592
593 for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
594 shmdnext = shmd->vm_next;
595 if (shmd->vm_ops == &shm_vm_ops
596 && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
597 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
598 }
599 return 0;
600 }
601
602
603
604
605 static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
606 {
607 pte_t pte;
608 struct shmid_ds *shp;
609 unsigned int id, idx;
610
611 id = SWP_OFFSET(code) & SHM_ID_MASK;
612 if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) {
613 printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
614 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
615 return BAD_PAGE;
616 }
617 if (id > max_shmid) {
618 printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
619 return BAD_PAGE;
620 }
621 shp = shm_segs[id];
622 if (shp == IPC_UNUSED || shp == IPC_NOID) {
623 printk ("shm_swap_in: id=%d invalid. Race.\n", id);
624 return BAD_PAGE;
625 }
626 idx = (SWP_OFFSET(code) >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
627 if (idx != (offset >> PAGE_SHIFT)) {
628 printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
629 idx, offset >> PAGE_SHIFT);
630 return BAD_PAGE;
631 }
632 if (idx >= shp->shm_npages) {
633 printk ("shm_swap_in : too large page index. id=%d\n", id);
634 return BAD_PAGE;
635 }
636
637 pte_val(pte) = shp->shm_pages[idx];
638 if (!pte_present(pte)) {
639 unsigned long page = get_free_page(GFP_KERNEL);
640 if (!page) {
641 oom(current);
642 return BAD_PAGE;
643 }
644 pte_val(pte) = shp->shm_pages[idx];
645 if (pte_present(pte)) {
646 free_page (page);
647 goto done;
648 }
649 if (!pte_none(pte)) {
650 read_swap_page(pte_val(pte), (char *) page);
651 pte_val(pte) = shp->shm_pages[idx];
652 if (pte_present(pte)) {
653 free_page (page);
654 goto done;
655 }
656 swap_free(pte_val(pte));
657 shm_swp--;
658 }
659 shm_rss++;
660 pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
661 shp->shm_pages[idx] = pte_val(pte);
662 } else
663 --current->maj_flt;
664
665 done:
666 current->min_flt++;
667 mem_map[MAP_NR(pte_page(pte))].count++;
668 return pte_modify(pte, shmd->vm_page_prot);
669 }
670
671
672
673
674 static unsigned long swap_id = 0;
675 static unsigned long swap_idx = 0;
676
677 int shm_swap (int prio, unsigned long limit)
678 {
679 pte_t page;
680 struct shmid_ds *shp;
681 struct vm_area_struct *shmd;
682 unsigned long swap_nr;
683 unsigned long id, idx;
684 int loop = 0;
685 int counter;
686
687 counter = shm_rss >> prio;
688 if (!counter || !(swap_nr = get_swap_page()))
689 return 0;
690
691 check_id:
692 shp = shm_segs[swap_id];
693 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
694 next_id:
695 swap_idx = 0;
696 if (++swap_id > max_shmid) {
697 if (loop)
698 goto failed;
699 loop = 1;
700 swap_id = 0;
701 }
702 goto check_id;
703 }
704 id = swap_id;
705
706 check_table:
707 idx = swap_idx++;
708 if (idx >= shp->shm_npages)
709 goto next_id;
710
711 pte_val(page) = shp->shm_pages[idx];
712 if (!pte_present(page))
713 goto check_table;
714 if (pte_page(page) >= limit)
715 goto check_table;
716 swap_attempts++;
717
718 if (--counter < 0) {
719 failed:
720 swap_free (swap_nr);
721 return 0;
722 }
723 if (shp->attaches)
724 for (shmd = shp->attaches; ; ) {
725 do {
726 pgd_t *page_dir;
727 pmd_t *page_middle;
728 pte_t *page_table, pte;
729 unsigned long tmp;
730
731 if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) {
732 printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n",
733 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
734 continue;
735 }
736 tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
737 if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
738 continue;
739 page_dir = pgd_offset(shmd->vm_mm,tmp);
740 if (pgd_none(*page_dir) || pgd_bad(*page_dir)) {
741 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
742 id, shmd->vm_start, idx);
743 pgd_clear(page_dir);
744 continue;
745 }
746 page_middle = pmd_offset(page_dir,tmp);
747 if (pmd_none(*page_middle) || pmd_bad(*page_middle)) {
748 printk("shm_swap: bad pgmid! id=%ld start=%lx idx=%ld\n",
749 id, shmd->vm_start, idx);
750 pmd_clear(page_middle);
751 continue;
752 }
753 page_table = pte_offset(page_middle,tmp);
754 pte = *page_table;
755 if (!pte_present(pte))
756 continue;
757 if (pte_young(pte)) {
758 set_pte(page_table, pte_mkold(pte));
759 continue;
760 }
761 if (pte_page(pte) != pte_page(page))
762 printk("shm_swap_out: page and pte mismatch\n");
763 set_pte(page_table,
764 __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
765 mem_map[MAP_NR(pte_page(pte))].count--;
766 if (shmd->vm_mm->rss > 0)
767 shmd->vm_mm->rss--;
768 invalidate_page(shmd, tmp);
769
770 } while (0);
771 if ((shmd = shmd->vm_next_share) == shp->attaches)
772 break;
773 }
774
775 if (mem_map[MAP_NR(pte_page(page))].count != 1)
776 goto check_table;
777 shp->shm_pages[idx] = swap_nr;
778 write_swap_page (swap_nr, (char *) pte_page(page));
779 free_page(pte_page(page));
780 swap_successes++;
781 shm_swp++;
782 shm_rss--;
783 return 1;
784 }