This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- insert_attach
- remove_attach
- shm_map
- sys_shmat
- shm_open
- shm_close
- sys_shmdt
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/ipc.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/malloc.h>
15 #include <linux/swap.h>
16
17 #include <asm/segment.h>
18 #include <asm/pgtable.h>
19
20 extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
21 extern unsigned long get_swap_page (void);
22 static int findkey (key_t key);
23 static int newseg (key_t key, int shmflg, int size);
24 static int shm_map (struct vm_area_struct *shmd);
25 static void killseg (int id);
26 static void shm_open (struct vm_area_struct *shmd);
27 static void shm_close (struct vm_area_struct *shmd);
28 static pte_t shm_swap_in(struct vm_area_struct *, unsigned long, unsigned long);
29
30 static int shm_tot = 0;
31 static int shm_rss = 0;
32 static int shm_swp = 0;
33 static int max_shmid = 0;
34 static struct wait_queue *shm_lock = NULL;
35 static struct shmid_ds *shm_segs[SHMMNI];
36
37 static unsigned short shm_seq = 0;
38
39
40 static ulong swap_attempts = 0;
41 static ulong swap_successes = 0;
42 static ulong used_segs = 0;
43
44 void shm_init (void)
45 {
46 int id;
47
48 for (id = 0; id < SHMMNI; id++)
49 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
50 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
51 shm_lock = NULL;
52 return;
53 }
54
55 static int findkey (key_t key)
56 {
57 int id;
58 struct shmid_ds *shp;
59
60 for (id = 0; id <= max_shmid; id++) {
61 while ((shp = shm_segs[id]) == IPC_NOID)
62 sleep_on (&shm_lock);
63 if (shp == IPC_UNUSED)
64 continue;
65 if (key == shp->shm_perm.key)
66 return id;
67 }
68 return -1;
69 }
70
71
72
73
74 static int newseg (key_t key, int shmflg, int size)
75 {
76 struct shmid_ds *shp;
77 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
78 int id, i;
79
80 if (size < SHMMIN)
81 return -EINVAL;
82 if (shm_tot + numpages >= SHMALL)
83 return -ENOSPC;
84 for (id = 0; id < SHMMNI; id++)
85 if (shm_segs[id] == IPC_UNUSED) {
86 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
87 goto found;
88 }
89 return -ENOSPC;
90
91 found:
92 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
93 if (!shp) {
94 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
95 if (shm_lock)
96 wake_up (&shm_lock);
97 return -ENOMEM;
98 }
99
100 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
101 if (!shp->shm_pages) {
102 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
103 if (shm_lock)
104 wake_up (&shm_lock);
105 kfree(shp);
106 return -ENOMEM;
107 }
108
109 for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
110 shm_tot += numpages;
111 shp->shm_perm.key = key;
112 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
113 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
114 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
115 shp->shm_perm.seq = shm_seq;
116 shp->shm_segsz = size;
117 shp->shm_cpid = current->pid;
118 shp->attaches = NULL;
119 shp->shm_lpid = shp->shm_nattch = 0;
120 shp->shm_atime = shp->shm_dtime = 0;
121 shp->shm_ctime = CURRENT_TIME;
122 shp->shm_npages = numpages;
123
124 if (id > max_shmid)
125 max_shmid = id;
126 shm_segs[id] = shp;
127 used_segs++;
128 if (shm_lock)
129 wake_up (&shm_lock);
130 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
131 }
132
133 asmlinkage int sys_shmget (key_t key, int size, int shmflg)
134 {
135 struct shmid_ds *shp;
136 int id = 0;
137
138 if (size < 0 || size > SHMMAX)
139 return -EINVAL;
140 if (key == IPC_PRIVATE)
141 return newseg(key, shmflg, size);
142 if ((id = findkey (key)) == -1) {
143 if (!(shmflg & IPC_CREAT))
144 return -ENOENT;
145 return newseg(key, shmflg, size);
146 }
147 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
148 return -EEXIST;
149 shp = shm_segs[id];
150 if (shp->shm_perm.mode & SHM_DEST)
151 return -EIDRM;
152 if (size > shp->shm_segsz)
153 return -EINVAL;
154 if (ipcperms (&shp->shm_perm, shmflg))
155 return -EACCES;
156 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
157 }
158
159
160
161
162
163 static void killseg (int id)
164 {
165 struct shmid_ds *shp;
166 int i, numpages;
167
168 shp = shm_segs[id];
169 if (shp == IPC_NOID || shp == IPC_UNUSED) {
170 printk ("shm nono: killseg called on unused seg id=%d\n", id);
171 return;
172 }
173 shp->shm_perm.seq++;
174 shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI);
175 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
176 used_segs--;
177 if (id == max_shmid)
178 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
179 if (!shp->shm_pages) {
180 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
181 return;
182 }
183 numpages = shp->shm_npages;
184 for (i = 0; i < numpages ; i++) {
185 pte_t pte;
186 pte_val(pte) = shp->shm_pages[i];
187 if (pte_none(pte))
188 continue;
189 if (pte_present(pte)) {
190 free_page (pte_page(pte));
191 shm_rss--;
192 } else {
193 swap_free(pte_val(pte));
194 shm_swp--;
195 }
196 }
197 kfree(shp->shm_pages);
198 shm_tot -= numpages;
199 kfree(shp);
200 return;
201 }
202
203 asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
204 {
205 struct shmid_ds tbuf;
206 struct shmid_ds *shp;
207 struct ipc_perm *ipcp;
208 int id, err;
209
210 if (cmd < 0 || shmid < 0)
211 return -EINVAL;
212 if (cmd == IPC_SET) {
213 if (!buf)
214 return -EFAULT;
215 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
216 if (err)
217 return err;
218 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
219 }
220
221 switch (cmd) {
222 case IPC_INFO:
223 {
224 struct shminfo shminfo;
225 if (!buf)
226 return -EFAULT;
227 shminfo.shmmni = SHMMNI;
228 shminfo.shmmax = SHMMAX;
229 shminfo.shmmin = SHMMIN;
230 shminfo.shmall = SHMALL;
231 shminfo.shmseg = SHMSEG;
232 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
233 if (err)
234 return err;
235 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
236 return max_shmid;
237 }
238 case SHM_INFO:
239 {
240 struct shm_info shm_info;
241 if (!buf)
242 return -EFAULT;
243 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
244 if (err)
245 return err;
246 shm_info.used_ids = used_segs;
247 shm_info.shm_rss = shm_rss;
248 shm_info.shm_tot = shm_tot;
249 shm_info.shm_swp = shm_swp;
250 shm_info.swap_attempts = swap_attempts;
251 shm_info.swap_successes = swap_successes;
252 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
253 return max_shmid;
254 }
255 case SHM_STAT:
256 if (!buf)
257 return -EFAULT;
258 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
259 if (err)
260 return err;
261 if (shmid > max_shmid)
262 return -EINVAL;
263 shp = shm_segs[shmid];
264 if (shp == IPC_UNUSED || shp == IPC_NOID)
265 return -EINVAL;
266 if (ipcperms (&shp->shm_perm, S_IRUGO))
267 return -EACCES;
268 id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
269 tbuf.shm_perm = shp->shm_perm;
270 tbuf.shm_segsz = shp->shm_segsz;
271 tbuf.shm_atime = shp->shm_atime;
272 tbuf.shm_dtime = shp->shm_dtime;
273 tbuf.shm_ctime = shp->shm_ctime;
274 tbuf.shm_cpid = shp->shm_cpid;
275 tbuf.shm_lpid = shp->shm_lpid;
276 tbuf.shm_nattch = shp->shm_nattch;
277 memcpy_tofs (buf, &tbuf, sizeof(*buf));
278 return id;
279 }
280
281 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
282 if (shp == IPC_UNUSED || shp == IPC_NOID)
283 return -EINVAL;
284 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
285 return -EIDRM;
286 ipcp = &shp->shm_perm;
287
288 switch (cmd) {
289 case SHM_UNLOCK:
290 if (!suser())
291 return -EPERM;
292 if (!(ipcp->mode & SHM_LOCKED))
293 return -EINVAL;
294 ipcp->mode &= ~SHM_LOCKED;
295 break;
296 case SHM_LOCK:
297
298
299
300 if (!suser())
301 return -EPERM;
302 if (ipcp->mode & SHM_LOCKED)
303 return -EINVAL;
304 ipcp->mode |= SHM_LOCKED;
305 break;
306 case IPC_STAT:
307 if (ipcperms (ipcp, S_IRUGO))
308 return -EACCES;
309 if (!buf)
310 return -EFAULT;
311 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
312 if (err)
313 return err;
314 tbuf.shm_perm = shp->shm_perm;
315 tbuf.shm_segsz = shp->shm_segsz;
316 tbuf.shm_atime = shp->shm_atime;
317 tbuf.shm_dtime = shp->shm_dtime;
318 tbuf.shm_ctime = shp->shm_ctime;
319 tbuf.shm_cpid = shp->shm_cpid;
320 tbuf.shm_lpid = shp->shm_lpid;
321 tbuf.shm_nattch = shp->shm_nattch;
322 memcpy_tofs (buf, &tbuf, sizeof(*buf));
323 break;
324 case IPC_SET:
325 if (suser() || current->euid == shp->shm_perm.uid ||
326 current->euid == shp->shm_perm.cuid) {
327 ipcp->uid = tbuf.shm_perm.uid;
328 ipcp->gid = tbuf.shm_perm.gid;
329 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
330 | (tbuf.shm_perm.mode & S_IRWXUGO);
331 shp->shm_ctime = CURRENT_TIME;
332 break;
333 }
334 return -EPERM;
335 case IPC_RMID:
336 if (suser() || current->euid == shp->shm_perm.uid ||
337 current->euid == shp->shm_perm.cuid) {
338 shp->shm_perm.mode |= SHM_DEST;
339 if (shp->shm_nattch <= 0)
340 killseg (id);
341 break;
342 }
343 return -EPERM;
344 default:
345 return -EINVAL;
346 }
347 return 0;
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 static struct vm_operations_struct shm_vm_ops = {
364 shm_open,
365 shm_close,
366 NULL,
367 NULL,
368 NULL,
369 NULL,
370 NULL,
371 NULL,
372 NULL,
373 shm_swap_in
374 };
375
376
377 static inline void insert_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
378 {
379 struct vm_area_struct * attaches;
380
381 if ((attaches = shp->attaches)) {
382 shmd->vm_next_share = attaches;
383 shmd->vm_prev_share = attaches->vm_prev_share;
384 shmd->vm_prev_share->vm_next_share = shmd;
385 attaches->vm_prev_share = shmd;
386 } else
387 shp->attaches = shmd->vm_next_share = shmd->vm_prev_share = shmd;
388 }
389
390
391 static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
392 {
393 if (shmd->vm_next_share == shmd) {
394 if (shp->attaches != shmd) {
395 printk("shm_close: shm segment (id=%ld) attach list inconsistent\n",
396 SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
397 printk("shm_close: %08lx-%08lx %c%c%c%c %08lx %08lx\n",
398 shmd->vm_start, shmd->vm_end,
399 shmd->vm_flags & VM_READ ? 'r' : '-',
400 shmd->vm_flags & VM_WRITE ? 'w' : '-',
401 shmd->vm_flags & VM_EXEC ? 'x' : '-',
402 shmd->vm_flags & VM_MAYSHARE ? 's' : 'p',
403 shmd->vm_offset, shmd->vm_pte);
404 }
405 shp->attaches = NULL;
406 } else {
407 if (shp->attaches == shmd)
408 shp->attaches = shmd->vm_next_share;
409 shmd->vm_prev_share->vm_next_share = shmd->vm_next_share;
410 shmd->vm_next_share->vm_prev_share = shmd->vm_prev_share;
411 }
412 }
413
414
415
416
417
418 static int shm_map (struct vm_area_struct *shmd)
419 {
420 pgd_t *page_dir;
421 pmd_t *page_middle;
422 pte_t *page_table;
423 unsigned long tmp, shm_sgn;
424
425
426 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
427
428
429 current->mm->total_vm += (shmd->vm_end - shmd->vm_start) >> PAGE_SHIFT;
430 insert_vm_struct(current, shmd);
431 merge_segments(current, shmd->vm_start, shmd->vm_end);
432
433
434 shm_sgn = shmd->vm_pte +
435 SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
436 flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
437 for (tmp = shmd->vm_start;
438 tmp < shmd->vm_end;
439 tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
440 {
441 page_dir = pgd_offset(shmd->vm_mm,tmp);
442 page_middle = pmd_alloc(page_dir,tmp);
443 if (!page_middle)
444 return -ENOMEM;
445 page_table = pte_alloc(page_middle,tmp);
446 if (!page_table)
447 return -ENOMEM;
448 set_pte(page_table, __pte(shm_sgn));
449 }
450 flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
451 return 0;
452 }
453
454
455
456
457 asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
458 {
459 struct shmid_ds *shp;
460 struct vm_area_struct *shmd;
461 int err;
462 unsigned int id;
463 unsigned long addr;
464
465 if (shmid < 0) {
466
467 return -EINVAL;
468 }
469
470 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
471 if (shp == IPC_UNUSED || shp == IPC_NOID) {
472
473 return -EINVAL;
474 }
475
476 if (!(addr = (ulong) shmaddr)) {
477 if (shmflg & SHM_REMAP)
478 return -EINVAL;
479 if (!(addr = get_unmapped_area(0, shp->shm_segsz)))
480 return -ENOMEM;
481 } else if (addr & (SHMLBA-1)) {
482 if (shmflg & SHM_RND)
483 addr &= ~(SHMLBA-1);
484 else
485 return -EINVAL;
486 }
487
488
489
490
491 if (addr < current->mm->start_stack &&
492 addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
493 {
494
495 return -EINVAL;
496 }
497 if (!(shmflg & SHM_REMAP))
498 if ((shmd = find_vma_intersection(current, addr, addr + shp->shm_segsz))) {
499
500
501 return -EINVAL;
502 }
503
504 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
505 return -EACCES;
506 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
507 return -EIDRM;
508
509 shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
510 if (!shmd)
511 return -ENOMEM;
512 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
513 kfree(shmd);
514 return -EIDRM;
515 }
516
517 shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id);
518 shmd->vm_start = addr;
519 shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
520 shmd->vm_mm = current->mm;
521 shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
522 shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
523 | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
524 | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
525 shmd->vm_next_share = shmd->vm_prev_share = NULL;
526 shmd->vm_inode = NULL;
527 shmd->vm_offset = 0;
528 shmd->vm_ops = &shm_vm_ops;
529
530 shp->shm_nattch++;
531 if ((err = shm_map (shmd))) {
532 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
533 killseg(id);
534 kfree(shmd);
535 return err;
536 }
537
538 insert_attach(shp,shmd);
539
540 shp->shm_lpid = current->pid;
541 shp->shm_atime = CURRENT_TIME;
542
543 *raddr = addr;
544 return 0;
545 }
546
547
548 static void shm_open (struct vm_area_struct *shmd)
549 {
550 unsigned int id;
551 struct shmid_ds *shp;
552
553 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
554 shp = shm_segs[id];
555 if (shp == IPC_UNUSED) {
556 printk("shm_open: unused id=%d PANIC\n", id);
557 return;
558 }
559 insert_attach(shp,shmd);
560 shp->shm_nattch++;
561 shp->shm_atime = CURRENT_TIME;
562 shp->shm_lpid = current->pid;
563 }
564
565
566
567
568
569
570
571 static void shm_close (struct vm_area_struct *shmd)
572 {
573 struct shmid_ds *shp;
574 int id;
575
576
577 id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
578 shp = shm_segs[id];
579 remove_attach(shp,shmd);
580 shp->shm_lpid = current->pid;
581 shp->shm_dtime = CURRENT_TIME;
582 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
583 killseg (id);
584 }
585
586
587
588
589
590 asmlinkage int sys_shmdt (char *shmaddr)
591 {
592 struct vm_area_struct *shmd, *shmdnext;
593
594 for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
595 shmdnext = shmd->vm_next;
596 if (shmd->vm_ops == &shm_vm_ops
597 && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
598 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
599 }
600 return 0;
601 }
602
603
604
605
606 static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
607 {
608 pte_t pte;
609 struct shmid_ds *shp;
610 unsigned int id, idx;
611
612 id = SWP_OFFSET(code) & SHM_ID_MASK;
613 if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) {
614 printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
615 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
616 return BAD_PAGE;
617 }
618 if (id > max_shmid) {
619 printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
620 return BAD_PAGE;
621 }
622 shp = shm_segs[id];
623 if (shp == IPC_UNUSED || shp == IPC_NOID) {
624 printk ("shm_swap_in: id=%d invalid. Race.\n", id);
625 return BAD_PAGE;
626 }
627 idx = (SWP_OFFSET(code) >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
628 if (idx != (offset >> PAGE_SHIFT)) {
629 printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
630 idx, offset >> PAGE_SHIFT);
631 return BAD_PAGE;
632 }
633 if (idx >= shp->shm_npages) {
634 printk ("shm_swap_in : too large page index. id=%d\n", id);
635 return BAD_PAGE;
636 }
637
638 pte_val(pte) = shp->shm_pages[idx];
639 if (!pte_present(pte)) {
640 unsigned long page = get_free_page(GFP_KERNEL);
641 if (!page) {
642 oom(current);
643 return BAD_PAGE;
644 }
645 pte_val(pte) = shp->shm_pages[idx];
646 if (pte_present(pte)) {
647 free_page (page);
648 goto done;
649 }
650 if (!pte_none(pte)) {
651 read_swap_page(pte_val(pte), (char *) page);
652 pte_val(pte) = shp->shm_pages[idx];
653 if (pte_present(pte)) {
654 free_page (page);
655 goto done;
656 }
657 swap_free(pte_val(pte));
658 shm_swp--;
659 }
660 shm_rss++;
661 pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
662 shp->shm_pages[idx] = pte_val(pte);
663 } else
664 --current->maj_flt;
665
666 done:
667 current->min_flt++;
668 mem_map[MAP_NR(pte_page(pte))].count++;
669 return pte_modify(pte, shmd->vm_page_prot);
670 }
671
672
673
674
675 static unsigned long swap_id = 0;
676 static unsigned long swap_idx = 0;
677
678 int shm_swap (int prio, int dma)
679 {
680 pte_t page;
681 struct shmid_ds *shp;
682 struct vm_area_struct *shmd;
683 unsigned long swap_nr;
684 unsigned long id, idx;
685 int loop = 0;
686 int counter;
687
688 counter = shm_rss >> prio;
689 if (!counter || !(swap_nr = get_swap_page()))
690 return 0;
691
692 check_id:
693 shp = shm_segs[swap_id];
694 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
695 next_id:
696 swap_idx = 0;
697 if (++swap_id > max_shmid) {
698 if (loop)
699 goto failed;
700 loop = 1;
701 swap_id = 0;
702 }
703 goto check_id;
704 }
705 id = swap_id;
706
707 check_table:
708 idx = swap_idx++;
709 if (idx >= shp->shm_npages)
710 goto next_id;
711
712 pte_val(page) = shp->shm_pages[idx];
713 if (!pte_present(page))
714 goto check_table;
715 if (dma && !PageDMA(MAP_NR(pte_page(page)) + mem_map))
716 goto check_table;
717 swap_attempts++;
718
719 if (--counter < 0) {
720 failed:
721 swap_free (swap_nr);
722 return 0;
723 }
724 if (shp->attaches)
725 for (shmd = shp->attaches; ; ) {
726 do {
727 pgd_t *page_dir;
728 pmd_t *page_middle;
729 pte_t *page_table, pte;
730 unsigned long tmp;
731
732 if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) {
733 printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n",
734 id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
735 continue;
736 }
737 tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
738 if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
739 continue;
740 page_dir = pgd_offset(shmd->vm_mm,tmp);
741 if (pgd_none(*page_dir) || pgd_bad(*page_dir)) {
742 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
743 id, shmd->vm_start, idx);
744 pgd_clear(page_dir);
745 continue;
746 }
747 page_middle = pmd_offset(page_dir,tmp);
748 if (pmd_none(*page_middle) || pmd_bad(*page_middle)) {
749 printk("shm_swap: bad pgmid! id=%ld start=%lx idx=%ld\n",
750 id, shmd->vm_start, idx);
751 pmd_clear(page_middle);
752 continue;
753 }
754 page_table = pte_offset(page_middle,tmp);
755 pte = *page_table;
756 if (!pte_present(pte))
757 continue;
758 if (pte_young(pte)) {
759 set_pte(page_table, pte_mkold(pte));
760 continue;
761 }
762 if (pte_page(pte) != pte_page(page))
763 printk("shm_swap_out: page and pte mismatch\n");
764 flush_cache_page(shmd, tmp);
765 set_pte(page_table,
766 __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
767 mem_map[MAP_NR(pte_page(pte))].count--;
768 if (shmd->vm_mm->rss > 0)
769 shmd->vm_mm->rss--;
770 flush_tlb_page(shmd, tmp);
771
772 } while (0);
773 if ((shmd = shmd->vm_next_share) == shp->attaches)
774 break;
775 }
776
777 if (mem_map[MAP_NR(pte_page(page))].count != 1)
778 goto check_table;
779 shp->shm_pages[idx] = swap_nr;
780 write_swap_page (swap_nr, (char *) pte_page(page));
781 free_page(pte_page(page));
782 swap_successes++;
783 shm_swp++;
784 shm_rss--;
785 return 1;
786 }