This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- insert_attach
- remove_attach
- shm_map
- sys_shmat
- shm_open
- shm_close
- sys_shmdt
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/ipc.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/pgtable.h>
18
19 extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
20 extern unsigned long get_swap_page (void);
21 static int findkey (key_t key);
22 static int newseg (key_t key, int shmflg, int size);
23 static int shm_map (struct vm_area_struct *shmd);
24 static void killseg (int id);
25 static void shm_open (struct vm_area_struct *shmd);
26 static void shm_close (struct vm_area_struct *shmd);
27 static pte_t shm_swap_in(struct vm_area_struct *, unsigned long, unsigned long);
28
29 static int shm_tot = 0;
30 static int shm_rss = 0;
31 static int shm_swp = 0;
32 static int max_shmid = 0;
33 static struct wait_queue *shm_lock = NULL;
34 static struct shmid_ds *shm_segs[SHMMNI];
35
36 static unsigned short shm_seq = 0;
37
38
39 static ulong swap_attempts = 0;
40 static ulong swap_successes = 0;
41 static ulong used_segs = 0;
42
43 void shm_init (void)
44 {
45 int id;
46
47 for (id = 0; id < SHMMNI; id++)
48 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
49 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
50 shm_lock = NULL;
51 return;
52 }
53
54 static int findkey (key_t key)
55 {
56 int id;
57 struct shmid_ds *shp;
58
59 for (id = 0; id <= max_shmid; id++) {
60 while ((shp = shm_segs[id]) == IPC_NOID)
61 sleep_on (&shm_lock);
62 if (shp == IPC_UNUSED)
63 continue;
64 if (key == shp->shm_perm.key)
65 return id;
66 }
67 return -1;
68 }
69
70
71
72
73 static int newseg (key_t key, int shmflg, int size)
74 {
75 struct shmid_ds *shp;
76 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
77 int id, i;
78
79 if (size < SHMMIN)
80 return -EINVAL;
81 if (shm_tot + numpages >= SHMALL)
82 return -ENOSPC;
83 for (id = 0; id < SHMMNI; id++)
84 if (shm_segs[id] == IPC_UNUSED) {
85 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
86 goto found;
87 }
88 return -ENOSPC;
89
90 found:
91 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
92 if (!shp) {
93 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
94 if (shm_lock)
95 wake_up (&shm_lock);
96 return -ENOMEM;
97 }
98
99 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
100 if (!shp->shm_pages) {
101 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
102 if (shm_lock)
103 wake_up (&shm_lock);
104 kfree(shp);
105 return -ENOMEM;
106 }
107
108 for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
109 shm_tot += numpages;
110 shp->shm_perm.key = key;
111 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
112 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
113 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
114 shp->shm_perm.seq = shm_seq;
115 shp->shm_segsz = size;
116 shp->shm_cpid = current->pid;
117 shp->attaches = NULL;
118 shp->shm_lpid = shp->shm_nattch = 0;
119 shp->shm_atime = shp->shm_dtime = 0;
120 shp->shm_ctime = CURRENT_TIME;
121 shp->shm_npages = numpages;
122
123 if (id > max_shmid)
124 max_shmid = id;
125 shm_segs[id] = shp;
126 used_segs++;
127 if (shm_lock)
128 wake_up (&shm_lock);
129 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
130 }
131
132 asmlinkage int sys_shmget (key_t key, int size, int shmflg)
133 {
134 struct shmid_ds *shp;
135 int id = 0;
136
137 if (size < 0 || size > SHMMAX)
138 return -EINVAL;
139 if (key == IPC_PRIVATE)
140 return newseg(key, shmflg, size);
141 if ((id = findkey (key)) == -1) {
142 if (!(shmflg & IPC_CREAT))
143 return -ENOENT;
144 return newseg(key, shmflg, size);
145 }
146 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
147 return -EEXIST;
148 shp = shm_segs[id];
149 if (shp->shm_perm.mode & SHM_DEST)
150 return -EIDRM;
151 if (size > shp->shm_segsz)
152 return -EINVAL;
153 if (ipcperms (&shp->shm_perm, shmflg))
154 return -EACCES;
155 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
156 }
157
158
159
160
161
162 static void killseg (int id)
163 {
164 struct shmid_ds *shp;
165 int i, numpages;
166
167 shp = shm_segs[id];
168 if (shp == IPC_NOID || shp == IPC_UNUSED) {
169 printk ("shm nono: killseg called on unused seg id=%d\n", id);
170 return;
171 }
172 shp->shm_perm.seq++;
173 shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI);
174 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
175 used_segs--;
176 if (id == max_shmid)
177 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
178 if (!shp->shm_pages) {
179 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
180 return;
181 }
182 numpages = shp->shm_npages;
183 for (i = 0; i < numpages ; i++) {
184 pte_t pte;
185 pte_val(pte) = shp->shm_pages[i];
186 if (pte_none(pte))
187 continue;
188 if (pte_present(pte)) {
189 free_page (pte_page(pte));
190 shm_rss--;
191 } else {
192 swap_free(pte_val(pte));
193 shm_swp--;
194 }
195 }
196 kfree(shp->shm_pages);
197 shm_tot -= numpages;
198 kfree(shp);
199 return;
200 }
201
202 asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
203 {
204 struct shmid_ds tbuf;
205 struct shmid_ds *shp;
206 struct ipc_perm *ipcp;
207 int id, err;
208
209 if (cmd < 0 || shmid < 0)
210 return -EINVAL;
211 if (cmd == IPC_SET) {
212 if (!buf)
213 return -EFAULT;
214 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
215 if (err)
216 return err;
217 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
218 }
219
220 switch (cmd) {
221 case IPC_INFO:
222 {
223 struct shminfo shminfo;
224 if (!buf)
225 return -EFAULT;
226 shminfo.shmmni = SHMMNI;
227 shminfo.shmmax = SHMMAX;
228 shminfo.shmmin = SHMMIN;
229 shminfo.shmall = SHMALL;
230 shminfo.shmseg = SHMSEG;
231 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
232 if (err)
233 return err;
234 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
235 return max_shmid;
236 }
237 case SHM_INFO:
238 {
239 struct shm_info shm_info;
240 if (!buf)
241 return -EFAULT;
242 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
243 if (err)
244 return err;
245 shm_info.used_ids = used_segs;
246 shm_info.shm_rss = shm_rss;
247 shm_info.shm_tot = shm_tot;
248 shm_info.shm_swp = shm_swp;
249 shm_info.swap_attempts = swap_attempts;
250 shm_info.swap_successes = swap_successes;
251 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
252 return max_shmid;
253 }
254 case SHM_STAT:
255 if (!buf)
256 return -EFAULT;
257 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
258 if (err)
259 return err;
260 if (shmid > max_shmid)
261 return -EINVAL;
262 shp = shm_segs[shmid];
263 if (shp == IPC_UNUSED || shp == IPC_NOID)
264 return -EINVAL;
265 if (ipcperms (&shp->shm_perm, S_IRUGO))
266 return -EACCES;
267 id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
268 tbuf.shm_perm = shp->shm_perm;
269 tbuf.shm_segsz = shp->shm_segsz;
270 tbuf.shm_atime = shp->shm_atime;
271 tbuf.shm_dtime = shp->shm_dtime;
272 tbuf.shm_ctime = shp->shm_ctime;
273 tbuf.shm_cpid = shp->shm_cpid;
274 tbuf.shm_lpid = shp->shm_lpid;
275 tbuf.shm_nattch = shp->shm_nattch;
276 memcpy_tofs (buf, &tbuf, sizeof(*buf));
277 return id;
278 }
279
280 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
281 if (shp == IPC_UNUSED || shp == IPC_NOID)
282 return -EINVAL;
283 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
284 return -EIDRM;
285 ipcp = &shp->shm_perm;
286
287 switch (cmd) {
288 case SHM_UNLOCK:
289 if (!suser())
290 return -EPERM;
291 if (!(ipcp->mode & SHM_LOCKED))
292 return -EINVAL;
293 ipcp->mode &= ~SHM_LOCKED;
294 break;
295 case SHM_LOCK:
296
297
298
299 if (!suser())
300 return -EPERM;
301 if (ipcp->mode & SHM_LOCKED)
302 return -EINVAL;
303 ipcp->mode |= SHM_LOCKED;
304 break;
305 case IPC_STAT:
306 if (ipcperms (ipcp, S_IRUGO))
307 return -EACCES;
308 if (!buf)
309 return -EFAULT;
310 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
311 if (err)
312 return err;
313 tbuf.shm_perm = shp->shm_perm;
314 tbuf.shm_segsz = shp->shm_segsz;
315 tbuf.shm_atime = shp->shm_atime;
316 tbuf.shm_dtime = shp->shm_dtime;
317 tbuf.shm_ctime = shp->shm_ctime;
318 tbuf.shm_cpid = shp->shm_cpid;
319 tbuf.shm_lpid = shp->shm_lpid;
320 tbuf.shm_nattch = shp->shm_nattch;
321 memcpy_tofs (buf, &tbuf, sizeof(*buf));
322 break;
323 case IPC_SET:
324 if (suser() || current->euid == shp->shm_perm.uid ||
325 current->euid == shp->shm_perm.cuid) {
326 ipcp->uid = tbuf.shm_perm.uid;
327 ipcp->gid = tbuf.shm_perm.gid;
328 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
329 | (tbuf.shm_perm.mode & S_IRWXUGO);
330 shp->shm_ctime = CURRENT_TIME;
331 break;
332 }
333 return -EPERM;
334 case IPC_RMID:
335 if (suser() || current->euid == shp->shm_perm.uid ||
336 current->euid == shp->shm_perm.cuid) {
337 shp->shm_perm.mode |= SHM_DEST;
338 if (shp->shm_nattch <= 0)
339 killseg (id);
340 break;
341 }
342 return -EPERM;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347 }
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362 static struct vm_operations_struct shm_vm_ops = {
363 shm_open,
364 shm_close,
365 NULL,
366 NULL,
367 NULL,
368 NULL,
369 NULL,
370 NULL,
371 NULL,
372 shm_swap_in
373 };
374
375
376 static inline void insert_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
377 {
378 struct vm_area_struct * attaches;
379
380 if ((attaches = shp->attaches)) {
381 shmd->vm_next_share = attaches;
382 shmd->vm_prev_share = attaches->vm_prev_share;
383 shmd->vm_prev_share->vm_next_share = shmd;
384 attaches->vm_prev_share = shmd;
385 } else
386 shp->attaches = shmd->vm_next_share = shmd->vm_prev_share = shmd;
387 }
388
389
390 static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
391 {
392 if (shmd->vm_next_share == shmd) {
393 if (shp->attaches != shmd) {
394 printk("shm_close: shm segment (id=%ld) attach list inconsistent\n",
395 SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
396 printk("shm_close: %08lx-%08lx %c%c%c%c %08lx %08lx\n",
397 shmd->vm_start, shmd->vm_end,
398 shmd->vm_flags & VM_READ ? 'r' : '-',
399 shmd->vm_flags & VM_WRITE ? 'w' : '-',
400 shmd->vm_flags & VM_EXEC ? 'x' : '-',
401 shmd->vm_flags & VM_MAYSHARE ? 's' : 'p',
402 shmd->vm_offset, shmd->vm_pte);
403 }
404 shp->attaches = NULL;
405 } else {
406 if (shp->attaches == shmd)
407 shp->attaches = shmd->vm_next_share;
408 shmd->vm_prev_share->vm_next_share = shmd->vm_next_share;
409 shmd->vm_next_share->vm_prev_share = shmd->vm_prev_share;
410 }
411 }
412
413
414
415
416
417 static int shm_map (struct vm_area_struct *shmd)
418 {
419 pgd_t *page_dir;
420 pmd_t *page_middle;
421 pte_t *page_table;
422 unsigned long tmp, shm_sgn;
423
424
425 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
426
427
428 insert_vm_struct(current, shmd);
429 merge_segments(current, shmd->vm_start, shmd->vm_end);
430
431
432 shm_sgn = shmd->vm_pte +
433 SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
434 for (tmp = shmd->vm_start;
435 tmp < shmd->vm_end;
436 tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
437 {
438 page_dir = pgd_offset(shmd->vm_mm,tmp);
439 page_middle = pmd_alloc(page_dir,tmp);
440 if (!page_middle)
441 return -ENOMEM;
442 page_table = pte_alloc(page_middle,tmp);
443 if (!page_table)
444 return -ENOMEM;
445 set_pte(page_table, __pte(shm_sgn));
446 }
447 invalidate();
448 return 0;
449 }
450
451
452
453
454 asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
455 {
456 struct shmid_ds *shp;
457 struct vm_area_struct *shmd;
458 int err;
459 unsigned int id;
460 unsigned long addr;
461
462 if (shmid < 0) {
463
464 return -EINVAL;
465 }
466
467 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
468 if (shp == IPC_UNUSED || shp == IPC_NOID) {
469
470 return -EINVAL;
471 }
472
473 if (!(addr = (ulong) shmaddr)) {
474 if (shmflg & SHM_REMAP)
475 return -EINVAL;
476 if (!(addr = get_unmapped_area(0, shp->shm_segsz)))
477 return -ENOMEM;
478 } else if (addr & (SHMLBA-1)) {
479 if (shmflg & SHM_RND)
480 addr &= ~(SHMLBA-1);
481 else
482 return -EINVAL;
483 }
484
485
486
487
488 if (addr < current->mm->start_stack &&
489 addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
490 {
491
492 return -EINVAL;
493 }
494 if (!(shmflg & SHM_REMAP))
495 if ((shmd = find_vma_intersection(current, addr, addr + shp->shm_segsz))) {
496
497
498 return -EINVAL;
499 }
500
501 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
502 return -EACCES;
503 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
504 return -EIDRM;
505
506 shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
507 if (!shmd)
508 return -ENOMEM;
509 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
510 kfree(shmd);
511 return -EIDRM;
512 }
513
514 shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id);
515 shmd->vm_start = addr;
516 shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
517 shmd->vm_mm = current->mm;
518 shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
519 shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
520 | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
521 | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
522 shmd->vm_next_share = shmd->vm_prev_share = NULL;
523 shmd->vm_inode = NULL;
524 shmd->vm_offset = 0;
525 shmd->vm_ops = &shm_vm_ops;
526
527 shp->shm_nattch++;
528 if ((err = shm_map (shmd))) {
529 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
530 killseg(id);
531 kfree(shmd);
532 return err;
533 }
534
535 insert_attach(shp,shmd);
536
537 shp->shm_lpid = current->pid;
538 shp->shm_atime = CURRENT_TIME;
539
540 *raddr = addr;
541 return 0;
542 }
543
544
545 static void shm_open (struct vm_area_struct *shmd)
546 {
547 unsigned int id;
548 struct shmid_ds *shp;
549
550 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
551 shp = shm_segs[id];
552 if (shp == IPC_UNUSED) {
553 printk("shm_open: unused id=%d PANIC\n", id);
554 return;
555 }
556 insert_attach(shp,shmd);
557 shp->shm_nattch++;
558 shp->shm_atime = CURRENT_TIME;
559 shp->shm_lpid = current->pid;
560 }
561
562
563
564
565
566
567
568 static void shm_close (struct vm_area_struct *shmd)
569 {
570 struct shmid_ds *shp;
571 int id;
572
573 unmap_page_range (shmd->vm_start, shmd->vm_end - shmd->vm_start);
574
575
576 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
577 shp = shm_segs[id];
578 remove_attach(shp,shmd);
579 shp->shm_lpid = current->pid;
580 shp->shm_dtime = CURRENT_TIME;
581 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
582 killseg (id);
583 }
584
585
586
587
588
589 asmlinkage int sys_shmdt (char *shmaddr)
590 {
591 struct vm_area_struct *shmd, *shmdnext;
592
593 for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
594 shmdnext = shmd->vm_next;
595 if (shmd->vm_ops == &shm_vm_ops
596 && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
597 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
598 }
599 return 0;
600 }
601
602
603
604
605 static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
606 {
607 pte_t pte;
608 struct shmid_ds *shp;
609 unsigned int id, idx;
610
611 id = SWP_OFFSET(code) & SHM_ID_MASK;
612 if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) {
613 printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
614 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
615 return BAD_PAGE;
616 }
617 if (id > max_shmid) {
618 printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
619 return BAD_PAGE;
620 }
621 shp = shm_segs[id];
622 if (shp == IPC_UNUSED || shp == IPC_NOID) {
623 printk ("shm_swap_in: id=%d invalid. Race.\n", id);
624 return BAD_PAGE;
625 }
626 idx = (SWP_OFFSET(code) >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
627 if (idx != (offset >> PAGE_SHIFT)) {
628 printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
629 idx, offset >> PAGE_SHIFT);
630 return BAD_PAGE;
631 }
632 if (idx >= shp->shm_npages) {
633 printk ("shm_swap_in : too large page index. id=%d\n", id);
634 return BAD_PAGE;
635 }
636
637 pte_val(pte) = shp->shm_pages[idx];
638 if (!pte_present(pte)) {
639 unsigned long page = get_free_page(GFP_KERNEL);
640 if (!page) {
641 oom(current);
642 return BAD_PAGE;
643 }
644 pte_val(pte) = shp->shm_pages[idx];
645 if (pte_present(pte)) {
646 free_page (page);
647 goto done;
648 }
649 if (!pte_none(pte)) {
650 read_swap_page(pte_val(pte), (char *) page);
651 pte_val(pte) = shp->shm_pages[idx];
652 if (pte_present(pte)) {
653 free_page (page);
654 goto done;
655 }
656 swap_free(pte_val(pte));
657 shm_swp--;
658 }
659 shm_rss++;
660 pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
661 shp->shm_pages[idx] = pte_val(pte);
662 } else
663 --current->maj_flt;
664
665 done:
666 current->min_flt++;
667 mem_map[MAP_NR(pte_page(pte))]++;
668 return pte_modify(pte, shmd->vm_page_prot);
669 }
670
671
672
673
674 static unsigned long swap_id = 0;
675 static unsigned long swap_idx = 0;
676
677 int shm_swap (int prio, unsigned long limit)
678 {
679 pte_t page;
680 struct shmid_ds *shp;
681 struct vm_area_struct *shmd;
682 unsigned long swap_nr;
683 unsigned long id, idx;
684 int loop = 0, invalid = 0;
685 int counter;
686
687 counter = shm_rss >> prio;
688 if (!counter || !(swap_nr = get_swap_page()))
689 return 0;
690
691 check_id:
692 shp = shm_segs[swap_id];
693 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
694 next_id:
695 swap_idx = 0;
696 if (++swap_id > max_shmid) {
697 if (loop)
698 goto failed;
699 loop = 1;
700 swap_id = 0;
701 }
702 goto check_id;
703 }
704 id = swap_id;
705
706 check_table:
707 idx = swap_idx++;
708 if (idx >= shp->shm_npages)
709 goto next_id;
710
711 pte_val(page) = shp->shm_pages[idx];
712 if (!pte_present(page))
713 goto check_table;
714 if (pte_page(page) >= limit)
715 goto check_table;
716 swap_attempts++;
717
718 if (--counter < 0) {
719 failed:
720 if (invalid)
721 invalidate();
722 swap_free (swap_nr);
723 return 0;
724 }
725 if (shp->attaches)
726 for (shmd = shp->attaches; ; ) {
727 do {
728 pgd_t *page_dir;
729 pmd_t *page_middle;
730 pte_t *page_table, pte;
731 unsigned long tmp;
732
733 if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) {
734 printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n",
735 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
736 continue;
737 }
738 tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
739 if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
740 continue;
741 page_dir = pgd_offset(shmd->vm_mm,tmp);
742 if (pgd_none(*page_dir) || pgd_bad(*page_dir)) {
743 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
744 id, shmd->vm_start, idx);
745 pgd_clear(page_dir);
746 continue;
747 }
748 page_middle = pmd_offset(page_dir,tmp);
749 if (pmd_none(*page_middle) || pmd_bad(*page_middle)) {
750 printk("shm_swap: bad pgmid! id=%ld start=%lx idx=%ld\n",
751 id, shmd->vm_start, idx);
752 pmd_clear(page_middle);
753 continue;
754 }
755 page_table = pte_offset(page_middle,tmp);
756 pte = *page_table;
757 if (!pte_present(pte))
758 continue;
759 if (pte_young(pte)) {
760 set_pte(page_table, pte_mkold(pte));
761 continue;
762 }
763 if (pte_page(pte) != pte_page(page))
764 printk("shm_swap_out: page and pte mismatch\n");
765 set_pte(page_table,
766 __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
767 mem_map[MAP_NR(pte_page(pte))]--;
768 if (shmd->vm_mm->rss > 0)
769 shmd->vm_mm->rss--;
770 invalid++;
771
772 } while (0);
773 if ((shmd = shmd->vm_next_share) == shp->attaches)
774 break;
775 }
776
777 if (mem_map[MAP_NR(pte_page(page))] != 1)
778 goto check_table;
779 shp->shm_pages[idx] = swap_nr;
780 if (invalid)
781 invalidate();
782 write_swap_page (swap_nr, (char *) pte_page(page));
783 free_page(pte_page(page));
784 swap_successes++;
785 shm_swp++;
786 shm_rss--;
787 return 1;
788 }