This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- insert_attach
- remove_attach
- shm_map
- sys_shmat
- shm_open
- shm_close
- sys_shmdt
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/ipc.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/malloc.h>
15 #include <linux/swap.h>
16
17 #include <asm/segment.h>
18 #include <asm/pgtable.h>
19
20 extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
21 extern unsigned long get_swap_page (void);
22 static int findkey (key_t key);
23 static int newseg (key_t key, int shmflg, int size);
24 static int shm_map (struct vm_area_struct *shmd);
25 static void killseg (int id);
26 static void shm_open (struct vm_area_struct *shmd);
27 static void shm_close (struct vm_area_struct *shmd);
28 static pte_t shm_swap_in(struct vm_area_struct *, unsigned long, unsigned long);
29
30 static int shm_tot = 0;
31 static int shm_rss = 0;
32 static int shm_swp = 0;
33 static int max_shmid = 0;
34 static struct wait_queue *shm_lock = NULL;
35 static struct shmid_ds *shm_segs[SHMMNI];
36
37 static unsigned short shm_seq = 0;
38
39
40 static ulong swap_attempts = 0;
41 static ulong swap_successes = 0;
42 static ulong used_segs = 0;
43
44 void shm_init (void)
45 {
46 int id;
47
48 for (id = 0; id < SHMMNI; id++)
49 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
50 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
51 shm_lock = NULL;
52 return;
53 }
54
55 static int findkey (key_t key)
56 {
57 int id;
58 struct shmid_ds *shp;
59
60 for (id = 0; id <= max_shmid; id++) {
61 while ((shp = shm_segs[id]) == IPC_NOID)
62 sleep_on (&shm_lock);
63 if (shp == IPC_UNUSED)
64 continue;
65 if (key == shp->shm_perm.key)
66 return id;
67 }
68 return -1;
69 }
70
71
72
73
74 static int newseg (key_t key, int shmflg, int size)
75 {
76 struct shmid_ds *shp;
77 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
78 int id, i;
79
80 if (size < SHMMIN)
81 return -EINVAL;
82 if (shm_tot + numpages >= SHMALL)
83 return -ENOSPC;
84 for (id = 0; id < SHMMNI; id++)
85 if (shm_segs[id] == IPC_UNUSED) {
86 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
87 goto found;
88 }
89 return -ENOSPC;
90
91 found:
92 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
93 if (!shp) {
94 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
95 if (shm_lock)
96 wake_up (&shm_lock);
97 return -ENOMEM;
98 }
99
100 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
101 if (!shp->shm_pages) {
102 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
103 if (shm_lock)
104 wake_up (&shm_lock);
105 kfree(shp);
106 return -ENOMEM;
107 }
108
109 for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
110 shm_tot += numpages;
111 shp->shm_perm.key = key;
112 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
113 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
114 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
115 shp->shm_perm.seq = shm_seq;
116 shp->shm_segsz = size;
117 shp->shm_cpid = current->pid;
118 shp->attaches = NULL;
119 shp->shm_lpid = shp->shm_nattch = 0;
120 shp->shm_atime = shp->shm_dtime = 0;
121 shp->shm_ctime = CURRENT_TIME;
122 shp->shm_npages = numpages;
123
124 if (id > max_shmid)
125 max_shmid = id;
126 shm_segs[id] = shp;
127 used_segs++;
128 if (shm_lock)
129 wake_up (&shm_lock);
130 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
131 }
132
133 asmlinkage int sys_shmget (key_t key, int size, int shmflg)
134 {
135 struct shmid_ds *shp;
136 int id = 0;
137
138 if (size < 0 || size > SHMMAX)
139 return -EINVAL;
140 if (key == IPC_PRIVATE)
141 return newseg(key, shmflg, size);
142 if ((id = findkey (key)) == -1) {
143 if (!(shmflg & IPC_CREAT))
144 return -ENOENT;
145 return newseg(key, shmflg, size);
146 }
147 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
148 return -EEXIST;
149 shp = shm_segs[id];
150 if (shp->shm_perm.mode & SHM_DEST)
151 return -EIDRM;
152 if (size > shp->shm_segsz)
153 return -EINVAL;
154 if (ipcperms (&shp->shm_perm, shmflg))
155 return -EACCES;
156 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
157 }
158
159
160
161
162
163 static void killseg (int id)
164 {
165 struct shmid_ds *shp;
166 int i, numpages;
167
168 shp = shm_segs[id];
169 if (shp == IPC_NOID || shp == IPC_UNUSED) {
170 printk ("shm nono: killseg called on unused seg id=%d\n", id);
171 return;
172 }
173 shp->shm_perm.seq++;
174 shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI);
175 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
176 used_segs--;
177 if (id == max_shmid)
178 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
179 if (!shp->shm_pages) {
180 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
181 return;
182 }
183 numpages = shp->shm_npages;
184 for (i = 0; i < numpages ; i++) {
185 pte_t pte;
186 pte_val(pte) = shp->shm_pages[i];
187 if (pte_none(pte))
188 continue;
189 if (pte_present(pte)) {
190 free_page (pte_page(pte));
191 shm_rss--;
192 } else {
193 swap_free(pte_val(pte));
194 shm_swp--;
195 }
196 }
197 kfree(shp->shm_pages);
198 shm_tot -= numpages;
199 kfree(shp);
200 return;
201 }
202
203 asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
204 {
205 struct shmid_ds tbuf;
206 struct shmid_ds *shp;
207 struct ipc_perm *ipcp;
208 int id, err;
209
210 if (cmd < 0 || shmid < 0)
211 return -EINVAL;
212 if (cmd == IPC_SET) {
213 if (!buf)
214 return -EFAULT;
215 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
216 if (err)
217 return err;
218 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
219 }
220
221 switch (cmd) {
222 case IPC_INFO:
223 {
224 struct shminfo shminfo;
225 if (!buf)
226 return -EFAULT;
227 shminfo.shmmni = SHMMNI;
228 shminfo.shmmax = SHMMAX;
229 shminfo.shmmin = SHMMIN;
230 shminfo.shmall = SHMALL;
231 shminfo.shmseg = SHMSEG;
232 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
233 if (err)
234 return err;
235 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
236 return max_shmid;
237 }
238 case SHM_INFO:
239 {
240 struct shm_info shm_info;
241 if (!buf)
242 return -EFAULT;
243 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
244 if (err)
245 return err;
246 shm_info.used_ids = used_segs;
247 shm_info.shm_rss = shm_rss;
248 shm_info.shm_tot = shm_tot;
249 shm_info.shm_swp = shm_swp;
250 shm_info.swap_attempts = swap_attempts;
251 shm_info.swap_successes = swap_successes;
252 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
253 return max_shmid;
254 }
255 case SHM_STAT:
256 if (!buf)
257 return -EFAULT;
258 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
259 if (err)
260 return err;
261 if (shmid > max_shmid)
262 return -EINVAL;
263 shp = shm_segs[shmid];
264 if (shp == IPC_UNUSED || shp == IPC_NOID)
265 return -EINVAL;
266 if (ipcperms (&shp->shm_perm, S_IRUGO))
267 return -EACCES;
268 id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
269 tbuf.shm_perm = shp->shm_perm;
270 tbuf.shm_segsz = shp->shm_segsz;
271 tbuf.shm_atime = shp->shm_atime;
272 tbuf.shm_dtime = shp->shm_dtime;
273 tbuf.shm_ctime = shp->shm_ctime;
274 tbuf.shm_cpid = shp->shm_cpid;
275 tbuf.shm_lpid = shp->shm_lpid;
276 tbuf.shm_nattch = shp->shm_nattch;
277 memcpy_tofs (buf, &tbuf, sizeof(*buf));
278 return id;
279 }
280
281 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
282 if (shp == IPC_UNUSED || shp == IPC_NOID)
283 return -EINVAL;
284 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
285 return -EIDRM;
286 ipcp = &shp->shm_perm;
287
288 switch (cmd) {
289 case SHM_UNLOCK:
290 if (!suser())
291 return -EPERM;
292 if (!(ipcp->mode & SHM_LOCKED))
293 return -EINVAL;
294 ipcp->mode &= ~SHM_LOCKED;
295 break;
296 case SHM_LOCK:
297
298
299
300 if (!suser())
301 return -EPERM;
302 if (ipcp->mode & SHM_LOCKED)
303 return -EINVAL;
304 ipcp->mode |= SHM_LOCKED;
305 break;
306 case IPC_STAT:
307 if (ipcperms (ipcp, S_IRUGO))
308 return -EACCES;
309 if (!buf)
310 return -EFAULT;
311 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
312 if (err)
313 return err;
314 tbuf.shm_perm = shp->shm_perm;
315 tbuf.shm_segsz = shp->shm_segsz;
316 tbuf.shm_atime = shp->shm_atime;
317 tbuf.shm_dtime = shp->shm_dtime;
318 tbuf.shm_ctime = shp->shm_ctime;
319 tbuf.shm_cpid = shp->shm_cpid;
320 tbuf.shm_lpid = shp->shm_lpid;
321 tbuf.shm_nattch = shp->shm_nattch;
322 memcpy_tofs (buf, &tbuf, sizeof(*buf));
323 break;
324 case IPC_SET:
325 if (suser() || current->euid == shp->shm_perm.uid ||
326 current->euid == shp->shm_perm.cuid) {
327 ipcp->uid = tbuf.shm_perm.uid;
328 ipcp->gid = tbuf.shm_perm.gid;
329 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
330 | (tbuf.shm_perm.mode & S_IRWXUGO);
331 shp->shm_ctime = CURRENT_TIME;
332 break;
333 }
334 return -EPERM;
335 case IPC_RMID:
336 if (suser() || current->euid == shp->shm_perm.uid ||
337 current->euid == shp->shm_perm.cuid) {
338 shp->shm_perm.mode |= SHM_DEST;
339 if (shp->shm_nattch <= 0)
340 killseg (id);
341 break;
342 }
343 return -EPERM;
344 default:
345 return -EINVAL;
346 }
347 return 0;
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 static struct vm_operations_struct shm_vm_ops = {
364 shm_open,
365 shm_close,
366 NULL,
367 NULL,
368 NULL,
369 NULL,
370 NULL,
371 NULL,
372 NULL,
373 shm_swap_in
374 };
375
376
377 static inline void insert_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
378 {
379 struct vm_area_struct * attaches;
380
381 if ((attaches = shp->attaches)) {
382 shmd->vm_next_share = attaches;
383 shmd->vm_prev_share = attaches->vm_prev_share;
384 shmd->vm_prev_share->vm_next_share = shmd;
385 attaches->vm_prev_share = shmd;
386 } else
387 shp->attaches = shmd->vm_next_share = shmd->vm_prev_share = shmd;
388 }
389
390
391 static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
392 {
393 if (shmd->vm_next_share == shmd) {
394 if (shp->attaches != shmd) {
395 printk("shm_close: shm segment (id=%ld) attach list inconsistent\n",
396 SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
397 printk("shm_close: %08lx-%08lx %c%c%c%c %08lx %08lx\n",
398 shmd->vm_start, shmd->vm_end,
399 shmd->vm_flags & VM_READ ? 'r' : '-',
400 shmd->vm_flags & VM_WRITE ? 'w' : '-',
401 shmd->vm_flags & VM_EXEC ? 'x' : '-',
402 shmd->vm_flags & VM_MAYSHARE ? 's' : 'p',
403 shmd->vm_offset, shmd->vm_pte);
404 }
405 shp->attaches = NULL;
406 } else {
407 if (shp->attaches == shmd)
408 shp->attaches = shmd->vm_next_share;
409 shmd->vm_prev_share->vm_next_share = shmd->vm_next_share;
410 shmd->vm_next_share->vm_prev_share = shmd->vm_prev_share;
411 }
412 }
413
414
415
416
417
418 static int shm_map (struct vm_area_struct *shmd)
419 {
420 pgd_t *page_dir;
421 pmd_t *page_middle;
422 pte_t *page_table;
423 unsigned long tmp, shm_sgn;
424 int error;
425
426
427 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
428
429
430 current->mm->total_vm += (shmd->vm_end - shmd->vm_start) >> PAGE_SHIFT;
431 insert_vm_struct(current, shmd);
432 merge_segments(current, shmd->vm_start, shmd->vm_end);
433
434
435 error = 0;
436 shm_sgn = shmd->vm_pte +
437 SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
438 flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
439 for (tmp = shmd->vm_start;
440 tmp < shmd->vm_end;
441 tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
442 {
443 page_dir = pgd_offset(shmd->vm_mm,tmp);
444 page_middle = pmd_alloc(page_dir,tmp);
445 if (!page_middle) {
446 error = -ENOMEM;
447 break;
448 }
449 page_table = pte_alloc(page_middle,tmp);
450 if (!page_table) {
451 error = -ENOMEM;
452 break;
453 }
454 set_pte(page_table, __pte(shm_sgn));
455 }
456 flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
457 return error;
458 }
459
460
461
462
463 asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
464 {
465 struct shmid_ds *shp;
466 struct vm_area_struct *shmd;
467 int err;
468 unsigned int id;
469 unsigned long addr;
470
471 if (shmid < 0) {
472
473 return -EINVAL;
474 }
475
476 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
477 if (shp == IPC_UNUSED || shp == IPC_NOID) {
478
479 return -EINVAL;
480 }
481
482 if (!(addr = (ulong) shmaddr)) {
483 if (shmflg & SHM_REMAP)
484 return -EINVAL;
485 if (!(addr = get_unmapped_area(0, shp->shm_segsz)))
486 return -ENOMEM;
487 } else if (addr & (SHMLBA-1)) {
488 if (shmflg & SHM_RND)
489 addr &= ~(SHMLBA-1);
490 else
491 return -EINVAL;
492 }
493
494
495
496
497 if (addr < current->mm->start_stack &&
498 addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
499 {
500
501 return -EINVAL;
502 }
503 if (!(shmflg & SHM_REMAP))
504 if ((shmd = find_vma_intersection(current, addr, addr + shp->shm_segsz))) {
505
506
507 return -EINVAL;
508 }
509
510 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
511 return -EACCES;
512 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
513 return -EIDRM;
514
515 shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
516 if (!shmd)
517 return -ENOMEM;
518 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
519 kfree(shmd);
520 return -EIDRM;
521 }
522
523 shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id);
524 shmd->vm_start = addr;
525 shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
526 shmd->vm_mm = current->mm;
527 shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
528 shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
529 | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
530 | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
531 shmd->vm_next_share = shmd->vm_prev_share = NULL;
532 shmd->vm_inode = NULL;
533 shmd->vm_offset = 0;
534 shmd->vm_ops = &shm_vm_ops;
535
536 shp->shm_nattch++;
537 if ((err = shm_map (shmd))) {
538 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
539 killseg(id);
540 kfree(shmd);
541 return err;
542 }
543
544 insert_attach(shp,shmd);
545
546 shp->shm_lpid = current->pid;
547 shp->shm_atime = CURRENT_TIME;
548
549 *raddr = addr;
550 return 0;
551 }
552
553
554 static void shm_open (struct vm_area_struct *shmd)
555 {
556 unsigned int id;
557 struct shmid_ds *shp;
558
559 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
560 shp = shm_segs[id];
561 if (shp == IPC_UNUSED) {
562 printk("shm_open: unused id=%d PANIC\n", id);
563 return;
564 }
565 insert_attach(shp,shmd);
566 shp->shm_nattch++;
567 shp->shm_atime = CURRENT_TIME;
568 shp->shm_lpid = current->pid;
569 }
570
571
572
573
574
575
576
577 static void shm_close (struct vm_area_struct *shmd)
578 {
579 struct shmid_ds *shp;
580 int id;
581
582
583 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
584 shp = shm_segs[id];
585 remove_attach(shp,shmd);
586 shp->shm_lpid = current->pid;
587 shp->shm_dtime = CURRENT_TIME;
588 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
589 killseg (id);
590 }
591
592
593
594
595
596 asmlinkage int sys_shmdt (char *shmaddr)
597 {
598 struct vm_area_struct *shmd, *shmdnext;
599
600 for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
601 shmdnext = shmd->vm_next;
602 if (shmd->vm_ops == &shm_vm_ops
603 && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
604 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
605 }
606 return 0;
607 }
608
609
610
611
612 static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
613 {
614 pte_t pte;
615 struct shmid_ds *shp;
616 unsigned int id, idx;
617
618 id = SWP_OFFSET(code) & SHM_ID_MASK;
619 if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) {
620 printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
621 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
622 return BAD_PAGE;
623 }
624 if (id > max_shmid) {
625 printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
626 return BAD_PAGE;
627 }
628 shp = shm_segs[id];
629 if (shp == IPC_UNUSED || shp == IPC_NOID) {
630 printk ("shm_swap_in: id=%d invalid. Race.\n", id);
631 return BAD_PAGE;
632 }
633 idx = (SWP_OFFSET(code) >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
634 if (idx != (offset >> PAGE_SHIFT)) {
635 printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
636 idx, offset >> PAGE_SHIFT);
637 return BAD_PAGE;
638 }
639 if (idx >= shp->shm_npages) {
640 printk ("shm_swap_in : too large page index. id=%d\n", id);
641 return BAD_PAGE;
642 }
643
644 pte_val(pte) = shp->shm_pages[idx];
645 if (!pte_present(pte)) {
646 unsigned long page = get_free_page(GFP_KERNEL);
647 if (!page) {
648 oom(current);
649 return BAD_PAGE;
650 }
651 pte_val(pte) = shp->shm_pages[idx];
652 if (pte_present(pte)) {
653 free_page (page);
654 goto done;
655 }
656 if (!pte_none(pte)) {
657 read_swap_page(pte_val(pte), (char *) page);
658 pte_val(pte) = shp->shm_pages[idx];
659 if (pte_present(pte)) {
660 free_page (page);
661 goto done;
662 }
663 swap_free(pte_val(pte));
664 shm_swp--;
665 }
666 shm_rss++;
667 pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
668 shp->shm_pages[idx] = pte_val(pte);
669 } else
670 --current->maj_flt;
671
672 done:
673 current->min_flt++;
674 mem_map[MAP_NR(pte_page(pte))].count++;
675 return pte_modify(pte, shmd->vm_page_prot);
676 }
677
678
679
680
681 static unsigned long swap_id = 0;
682 static unsigned long swap_idx = 0;
683
684 int shm_swap (int prio, int dma)
685 {
686 pte_t page;
687 struct shmid_ds *shp;
688 struct vm_area_struct *shmd;
689 unsigned long swap_nr;
690 unsigned long id, idx;
691 int loop = 0;
692 int counter;
693
694 counter = shm_rss >> prio;
695 if (!counter || !(swap_nr = get_swap_page()))
696 return 0;
697
698 check_id:
699 shp = shm_segs[swap_id];
700 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
701 next_id:
702 swap_idx = 0;
703 if (++swap_id > max_shmid) {
704 if (loop)
705 goto failed;
706 loop = 1;
707 swap_id = 0;
708 }
709 goto check_id;
710 }
711 id = swap_id;
712
713 check_table:
714 idx = swap_idx++;
715 if (idx >= shp->shm_npages)
716 goto next_id;
717
718 pte_val(page) = shp->shm_pages[idx];
719 if (!pte_present(page))
720 goto check_table;
721 if (dma && !PageDMA(&mem_map[MAP_NR(pte_page(page))]))
722 goto check_table;
723 swap_attempts++;
724
725 if (--counter < 0) {
726 failed:
727 swap_free (swap_nr);
728 return 0;
729 }
730 if (shp->attaches)
731 for (shmd = shp->attaches; ; ) {
732 do {
733 pgd_t *page_dir;
734 pmd_t *page_middle;
735 pte_t *page_table, pte;
736 unsigned long tmp;
737
738 if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) {
739 printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n",
740 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
741 continue;
742 }
743 tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
744 if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
745 continue;
746 page_dir = pgd_offset(shmd->vm_mm,tmp);
747 if (pgd_none(*page_dir) || pgd_bad(*page_dir)) {
748 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
749 id, shmd->vm_start, idx);
750 pgd_clear(page_dir);
751 continue;
752 }
753 page_middle = pmd_offset(page_dir,tmp);
754 if (pmd_none(*page_middle) || pmd_bad(*page_middle)) {
755 printk("shm_swap: bad pgmid! id=%ld start=%lx idx=%ld\n",
756 id, shmd->vm_start, idx);
757 pmd_clear(page_middle);
758 continue;
759 }
760 page_table = pte_offset(page_middle,tmp);
761 pte = *page_table;
762 if (!pte_present(pte))
763 continue;
764 if (pte_young(pte)) {
765 set_pte(page_table, pte_mkold(pte));
766 continue;
767 }
768 if (pte_page(pte) != pte_page(page))
769 printk("shm_swap_out: page and pte mismatch\n");
770 flush_cache_page(shmd, tmp);
771 set_pte(page_table,
772 __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
773 mem_map[MAP_NR(pte_page(pte))].count--;
774 if (shmd->vm_mm->rss > 0)
775 shmd->vm_mm->rss--;
776 flush_tlb_page(shmd, tmp);
777
778 } while (0);
779 if ((shmd = shmd->vm_next_share) == shp->attaches)
780 break;
781 }
782
783 if (mem_map[MAP_NR(pte_page(page))].count != 1)
784 goto check_table;
785 shp->shm_pages[idx] = swap_nr;
786 write_swap_page (swap_nr, (char *) pte_page(page));
787 free_page(pte_page(page));
788 swap_successes++;
789 shm_swp++;
790 shm_rss--;
791 return 1;
792 }