This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- shm_map
- sys_shmat
- shm_open
- shm_close
- sys_shmdt
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <asm/segment.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/ipc.h>
13 #include <linux/shm.h>
14 #include <linux/stat.h>
15 #include <linux/malloc.h>
16
17 extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
18 extern unsigned int get_swap_page (void);
19 static int findkey (key_t key);
20 static int newseg (key_t key, int shmflg, int size);
21 static int shm_map (struct vm_area_struct *shmd, int remap);
22 static void killseg (int id);
23 static void shm_open (struct vm_area_struct *shmd);
24 static void shm_close (struct vm_area_struct *shmd);
25 static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long, unsigned long);
26
27 static int shm_tot = 0;
28 static int shm_rss = 0;
29 static int shm_swp = 0;
30 static int max_shmid = 0;
31 static struct wait_queue *shm_lock = NULL;
32 static struct shmid_ds *shm_segs[SHMMNI];
33
34 static unsigned short shm_seq = 0;
35
36
37 static ulong swap_attempts = 0;
38 static ulong swap_successes = 0;
39 static ulong used_segs = 0;
40
41 void shm_init (void)
42 {
43 int id;
44
45 for (id = 0; id < SHMMNI; id++)
46 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
47 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
48 shm_lock = NULL;
49 return;
50 }
51
52 static int findkey (key_t key)
53 {
54 int id;
55 struct shmid_ds *shp;
56
57 for (id = 0; id <= max_shmid; id++) {
58 while ((shp = shm_segs[id]) == IPC_NOID)
59 sleep_on (&shm_lock);
60 if (shp == IPC_UNUSED)
61 continue;
62 if (key == shp->shm_perm.key)
63 return id;
64 }
65 return -1;
66 }
67
68
69
70
71 static int newseg (key_t key, int shmflg, int size)
72 {
73 struct shmid_ds *shp;
74 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
75 int id, i;
76
77 if (size < SHMMIN)
78 return -EINVAL;
79 if (shm_tot + numpages >= SHMALL)
80 return -ENOSPC;
81 for (id = 0; id < SHMMNI; id++)
82 if (shm_segs[id] == IPC_UNUSED) {
83 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
84 goto found;
85 }
86 return -ENOSPC;
87
88 found:
89 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
90 if (!shp) {
91 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
92 if (shm_lock)
93 wake_up (&shm_lock);
94 return -ENOMEM;
95 }
96
97 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
98 if (!shp->shm_pages) {
99 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
100 if (shm_lock)
101 wake_up (&shm_lock);
102 kfree(shp);
103 return -ENOMEM;
104 }
105
106 for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
107 shm_tot += numpages;
108 shp->shm_perm.key = key;
109 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
110 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
111 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
112 shp->shm_perm.seq = shm_seq;
113 shp->shm_segsz = size;
114 shp->shm_cpid = current->pid;
115 shp->attaches = NULL;
116 shp->shm_lpid = shp->shm_nattch = 0;
117 shp->shm_atime = shp->shm_dtime = 0;
118 shp->shm_ctime = CURRENT_TIME;
119 shp->shm_npages = numpages;
120
121 if (id > max_shmid)
122 max_shmid = id;
123 shm_segs[id] = shp;
124 used_segs++;
125 if (shm_lock)
126 wake_up (&shm_lock);
127 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
128 }
129
130 int sys_shmget (key_t key, int size, int shmflg)
131 {
132 struct shmid_ds *shp;
133 int id = 0;
134
135 if (size < 0 || size > SHMMAX)
136 return -EINVAL;
137 if (key == IPC_PRIVATE)
138 return newseg(key, shmflg, size);
139 if ((id = findkey (key)) == -1) {
140 if (!(shmflg & IPC_CREAT))
141 return -ENOENT;
142 return newseg(key, shmflg, size);
143 }
144 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
145 return -EEXIST;
146 shp = shm_segs[id];
147 if (shp->shm_perm.mode & SHM_DEST)
148 return -EIDRM;
149 if (size > shp->shm_segsz)
150 return -EINVAL;
151 if (ipcperms (&shp->shm_perm, shmflg))
152 return -EACCES;
153 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
154 }
155
156
157
158
159
160 static void killseg (int id)
161 {
162 struct shmid_ds *shp;
163 int i, numpages;
164 ulong page;
165
166 shp = shm_segs[id];
167 if (shp == IPC_NOID || shp == IPC_UNUSED) {
168 printk ("shm nono: killseg called on unused seg id=%d\n", id);
169 return;
170 }
171 shp->shm_perm.seq++;
172 shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI);
173 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
174 used_segs--;
175 if (id == max_shmid)
176 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
177 if (!shp->shm_pages) {
178 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
179 return;
180 }
181 numpages = shp->shm_npages;
182 for (i = 0; i < numpages ; i++) {
183 if (!(page = shp->shm_pages[i]))
184 continue;
185 if (page & PAGE_PRESENT) {
186 free_page (page & PAGE_MASK);
187 shm_rss--;
188 } else {
189 swap_free (page);
190 shm_swp--;
191 }
192 }
193 kfree(shp->shm_pages);
194 shm_tot -= numpages;
195 kfree(shp);
196 return;
197 }
198
199 int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
200 {
201 struct shmid_ds tbuf;
202 struct shmid_ds *shp;
203 struct ipc_perm *ipcp;
204 int id, err;
205
206 if (cmd < 0 || shmid < 0)
207 return -EINVAL;
208 if (cmd == IPC_SET) {
209 if (!buf)
210 return -EFAULT;
211 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
212 if (err)
213 return err;
214 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
215 }
216
217 switch (cmd) {
218 case IPC_INFO:
219 {
220 struct shminfo shminfo;
221 if (!buf)
222 return -EFAULT;
223 shminfo.shmmni = SHMMNI;
224 shminfo.shmmax = SHMMAX;
225 shminfo.shmmin = SHMMIN;
226 shminfo.shmall = SHMALL;
227 shminfo.shmseg = SHMSEG;
228 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
229 if (err)
230 return err;
231 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
232 return max_shmid;
233 }
234 case SHM_INFO:
235 {
236 struct shm_info shm_info;
237 if (!buf)
238 return -EFAULT;
239 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
240 if (err)
241 return err;
242 shm_info.used_ids = used_segs;
243 shm_info.shm_rss = shm_rss;
244 shm_info.shm_tot = shm_tot;
245 shm_info.shm_swp = shm_swp;
246 shm_info.swap_attempts = swap_attempts;
247 shm_info.swap_successes = swap_successes;
248 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
249 return max_shmid;
250 }
251 case SHM_STAT:
252 if (!buf)
253 return -EFAULT;
254 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
255 if (err)
256 return err;
257 if (shmid > max_shmid)
258 return -EINVAL;
259 shp = shm_segs[shmid];
260 if (shp == IPC_UNUSED || shp == IPC_NOID)
261 return -EINVAL;
262 if (ipcperms (&shp->shm_perm, S_IRUGO))
263 return -EACCES;
264 id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
265 tbuf.shm_perm = shp->shm_perm;
266 tbuf.shm_segsz = shp->shm_segsz;
267 tbuf.shm_atime = shp->shm_atime;
268 tbuf.shm_dtime = shp->shm_dtime;
269 tbuf.shm_ctime = shp->shm_ctime;
270 tbuf.shm_cpid = shp->shm_cpid;
271 tbuf.shm_lpid = shp->shm_lpid;
272 tbuf.shm_nattch = shp->shm_nattch;
273 memcpy_tofs (buf, &tbuf, sizeof(*buf));
274 return id;
275 }
276
277 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
278 if (shp == IPC_UNUSED || shp == IPC_NOID)
279 return -EINVAL;
280 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
281 return -EIDRM;
282 ipcp = &shp->shm_perm;
283
284 switch (cmd) {
285 case SHM_UNLOCK:
286 if (!suser())
287 return -EPERM;
288 if (!(ipcp->mode & SHM_LOCKED))
289 return -EINVAL;
290 ipcp->mode &= ~SHM_LOCKED;
291 break;
292 case SHM_LOCK:
293
294
295
296 if (!suser())
297 return -EPERM;
298 if (ipcp->mode & SHM_LOCKED)
299 return -EINVAL;
300 ipcp->mode |= SHM_LOCKED;
301 break;
302 case IPC_STAT:
303 if (ipcperms (ipcp, S_IRUGO))
304 return -EACCES;
305 if (!buf)
306 return -EFAULT;
307 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
308 if (err)
309 return err;
310 tbuf.shm_perm = shp->shm_perm;
311 tbuf.shm_segsz = shp->shm_segsz;
312 tbuf.shm_atime = shp->shm_atime;
313 tbuf.shm_dtime = shp->shm_dtime;
314 tbuf.shm_ctime = shp->shm_ctime;
315 tbuf.shm_cpid = shp->shm_cpid;
316 tbuf.shm_lpid = shp->shm_lpid;
317 tbuf.shm_nattch = shp->shm_nattch;
318 memcpy_tofs (buf, &tbuf, sizeof(*buf));
319 break;
320 case IPC_SET:
321 if (suser() || current->euid == shp->shm_perm.uid ||
322 current->euid == shp->shm_perm.cuid) {
323 ipcp->uid = tbuf.shm_perm.uid;
324 ipcp->gid = tbuf.shm_perm.gid;
325 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
326 | (tbuf.shm_perm.mode & S_IRWXUGO);
327 shp->shm_ctime = CURRENT_TIME;
328 break;
329 }
330 return -EPERM;
331 case IPC_RMID:
332 if (suser() || current->euid == shp->shm_perm.uid ||
333 current->euid == shp->shm_perm.cuid) {
334 shp->shm_perm.mode |= SHM_DEST;
335 if (shp->shm_nattch <= 0)
336 killseg (id);
337 break;
338 }
339 return -EPERM;
340 default:
341 return -EINVAL;
342 }
343 return 0;
344 }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 static struct vm_operations_struct shm_vm_ops = {
360 shm_open,
361 shm_close,
362 NULL,
363 NULL,
364 NULL,
365 NULL,
366 NULL,
367 NULL,
368 NULL,
369 shm_swap_in
370 };
371
372
373
374
375
376
377 static int shm_map (struct vm_area_struct *shmd, int remap)
378 {
379 unsigned long *page_table;
380 unsigned long tmp, shm_sgn;
381
382
383 if (!remap)
384 for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
385 page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
386 if (*page_table & PAGE_PRESENT) {
387 page_table = (ulong *) (PAGE_MASK & *page_table);
388 page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
389 if (*page_table) {
390
391 return -EINVAL;
392 }
393 }
394 }
395
396
397 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
398
399
400 insert_vm_struct(current, shmd);
401 merge_segments(current->mm->mmap);
402
403
404 for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
405 page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
406 if (*page_table & PAGE_PRESENT) {
407 page_table = (ulong *) (PAGE_MASK & *page_table);
408 page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
409 if (*page_table) {
410 if (*page_table & PAGE_PRESENT) {
411 --current->mm->rss;
412 free_page (*page_table & PAGE_MASK);
413 }
414 else
415 swap_free (*page_table);
416 *page_table = 0;
417 }
418 } else {
419 unsigned long new_pt;
420 if (!(new_pt = get_free_page(GFP_KERNEL)))
421 return -ENOMEM;
422 *page_table = new_pt | PAGE_TABLE;
423 tmp |= ((PAGE_SIZE << 10) - PAGE_SIZE);
424 }
425 }
426
427
428 shm_sgn = shmd->vm_pte + ((shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
429 for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE,
430 shm_sgn += (1 << SHM_IDX_SHIFT)) {
431 page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
432 page_table = (ulong *) (PAGE_MASK & *page_table);
433 page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
434 *page_table = shm_sgn;
435 }
436 invalidate();
437 return 0;
438 }
439
440
441
442
443 int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
444 {
445 struct shmid_ds *shp;
446 struct vm_area_struct *shmd;
447 int err;
448 unsigned int id;
449 unsigned long addr;
450
451 if (shmid < 0) {
452
453 return -EINVAL;
454 }
455
456 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
457 if (shp == IPC_UNUSED || shp == IPC_NOID) {
458
459 return -EINVAL;
460 }
461
462 if (!(addr = (ulong) shmaddr)) {
463 if (shmflg & SHM_REMAP)
464 return -EINVAL;
465 if (!(addr = get_unmapped_area(shp->shm_segsz)))
466 return -ENOMEM;
467 } else if (addr & (SHMLBA-1)) {
468 if (shmflg & SHM_RND)
469 addr &= ~(SHMLBA-1);
470 else
471 return -EINVAL;
472 }
473 if ((addr > current->mm->start_stack - 16384 - PAGE_SIZE*shp->shm_npages)) {
474
475 return -EINVAL;
476 }
477 if (!(shmflg & SHM_REMAP))
478 for (shmd = current->mm->mmap; shmd; shmd = shmd->vm_next)
479 if (!(addr >= shmd->vm_end || addr + shp->shm_segsz <= shmd->vm_start)) {
480
481
482 return -EINVAL;
483 }
484
485 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
486 return -EACCES;
487 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
488 return -EIDRM;
489
490 shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
491 if (!shmd)
492 return -ENOMEM;
493 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
494 kfree(shmd);
495 return -EIDRM;
496 }
497
498 shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT);
499 shmd->vm_start = addr;
500 shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
501 shmd->vm_task = current;
502 shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
503 shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
504 | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
505 | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
506 shmd->vm_next_share = NULL;
507 shmd->vm_inode = NULL;
508 shmd->vm_offset = 0;
509 shmd->vm_ops = &shm_vm_ops;
510
511 shp->shm_nattch++;
512 if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
513 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
514 killseg(id);
515 kfree(shmd);
516 return err;
517 }
518
519 shmd->vm_next_share = shp->attaches;
520 shp->attaches = shmd;
521 shp->shm_lpid = current->pid;
522 shp->shm_atime = CURRENT_TIME;
523
524 *raddr = addr;
525 return 0;
526 }
527
528
529 static void shm_open (struct vm_area_struct *shmd)
530 {
531 unsigned int id;
532 struct shmid_ds *shp;
533
534 id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
535 shp = shm_segs[id];
536 if (shp == IPC_UNUSED) {
537 printk("shm_open: unused id=%d PANIC\n", id);
538 return;
539 }
540 shmd->vm_next_share = shp->attaches;
541 shp->attaches = shmd;
542 shp->shm_nattch++;
543 shp->shm_atime = CURRENT_TIME;
544 shp->shm_lpid = current->pid;
545 }
546
547
548
549
550
551
552
553 static void shm_close (struct vm_area_struct *shmd)
554 {
555 struct vm_area_struct **shmdp;
556 struct shmid_ds *shp;
557 int id;
558
559 unmap_page_range (shmd->vm_start, shmd->vm_end - shmd->vm_start);
560
561
562 id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
563 shp = shm_segs[id];
564 for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->vm_next_share)
565 if (*shmdp == shmd) {
566 *shmdp = shmd->vm_next_share;
567 goto found;
568 }
569 printk("shm_close: shm segment (id=%d) attach list inconsistent\n",id);
570 printk("shm_close: %d %08lx-%08lx %c%c%c%c %08lx %08lx\n",
571 shmd->vm_task->pid, shmd->vm_start, shmd->vm_end,
572 shmd->vm_flags & VM_READ ? 'r' : '-',
573 shmd->vm_flags & VM_WRITE ? 'w' : '-',
574 shmd->vm_flags & VM_EXEC ? 'x' : '-',
575 shmd->vm_flags & VM_SHARED ? 's' : 'p',
576 shmd->vm_offset, shmd->vm_pte);
577
578 found:
579 shp->shm_lpid = current->pid;
580 shp->shm_dtime = CURRENT_TIME;
581 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
582 killseg (id);
583 }
584
585
586
587
588
589 int sys_shmdt (char *shmaddr)
590 {
591 struct vm_area_struct *shmd, *shmdnext;
592
593 for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
594 shmdnext = shmd->vm_next;
595 if (shmd->vm_ops == &shm_vm_ops
596 && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
597 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
598 }
599 return 0;
600 }
601
602
603
604
605 static unsigned long shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
606 {
607 unsigned long page;
608 struct shmid_ds *shp;
609 unsigned int id, idx;
610
611 id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
612 if (id != ((shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK)) {
613 printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
614 id, (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK);
615 return BAD_PAGE | PAGE_SHARED;
616 }
617 if (id > max_shmid) {
618 printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
619 return BAD_PAGE | PAGE_SHARED;
620 }
621 shp = shm_segs[id];
622 if (shp == IPC_UNUSED || shp == IPC_NOID) {
623 printk ("shm_swap_in: id=%d invalid. Race.\n", id);
624 return BAD_PAGE | PAGE_SHARED;
625 }
626 idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
627 if (idx != (offset >> PAGE_SHIFT)) {
628 printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
629 idx, offset >> PAGE_SHIFT);
630 return BAD_PAGE | PAGE_SHARED;
631 }
632 if (idx >= shp->shm_npages) {
633 printk ("shm_swap_in : too large page index. id=%d\n", id);
634 return BAD_PAGE | PAGE_SHARED;
635 }
636
637 if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
638 if (!(page = get_free_page(GFP_KERNEL))) {
639 oom(current);
640 return BAD_PAGE | PAGE_SHARED;
641 }
642 if (shp->shm_pages[idx] & PAGE_PRESENT) {
643 free_page (page);
644 goto done;
645 }
646 if (shp->shm_pages[idx]) {
647 read_swap_page (shp->shm_pages[idx], (char *) page);
648 if (shp->shm_pages[idx] & PAGE_PRESENT) {
649 free_page (page);
650 goto done;
651 }
652 swap_free (shp->shm_pages[idx]);
653 shm_swp--;
654 }
655 shm_rss++;
656 shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
657 } else
658 --current->mm->maj_flt;
659
660 done:
661 current->mm->min_flt++;
662 page = shp->shm_pages[idx];
663 page &= ~(PAGE_RW & ~shmd->vm_page_prot);
664 mem_map[MAP_NR(page)]++;
665 return page;
666 }
667
668
669
670
671 static unsigned long swap_id = 0;
672 static unsigned long swap_idx = 0;
673
674 int shm_swap (int prio)
675 {
676 unsigned long page;
677 struct shmid_ds *shp;
678 struct vm_area_struct *shmd;
679 unsigned int swap_nr;
680 unsigned long id, idx, invalid = 0;
681 int counter;
682
683 counter = shm_rss >> prio;
684 if (!counter || !(swap_nr = get_swap_page()))
685 return 0;
686
687 check_id:
688 shp = shm_segs[swap_id];
689 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
690 swap_idx = 0;
691 if (++swap_id > max_shmid)
692 swap_id = 0;
693 goto check_id;
694 }
695 id = swap_id;
696
697 check_table:
698 idx = swap_idx++;
699 if (idx >= shp->shm_npages) {
700 swap_idx = 0;
701 if (++swap_id > max_shmid)
702 swap_id = 0;
703 goto check_id;
704 }
705
706 page = shp->shm_pages[idx];
707 if (!(page & PAGE_PRESENT))
708 goto check_table;
709 swap_attempts++;
710
711 if (--counter < 0) {
712 if (invalid)
713 invalidate();
714 swap_free (swap_nr);
715 return 0;
716 }
717 for (shmd = shp->attaches; shmd; shmd = shmd->vm_next_share) {
718 unsigned long tmp, *pte;
719 if ((shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
720 printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n", id, shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK);
721 continue;
722 }
723 tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
724 if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
725 continue;
726 pte = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
727 if (!(*pte & PAGE_PRESENT)) {
728 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
729 id, shmd->vm_start, idx);
730 *pte = 0;
731 continue;
732 }
733 pte = (ulong *) (PAGE_MASK & *pte);
734 pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
735 tmp = *pte;
736 if (!(tmp & PAGE_PRESENT))
737 continue;
738 if (tmp & PAGE_ACCESSED) {
739 *pte &= ~PAGE_ACCESSED;
740 continue;
741 }
742 *pte = shmd->vm_pte | idx << SHM_IDX_SHIFT;
743 mem_map[MAP_NR(page)]--;
744 shmd->vm_task->mm->rss--;
745 invalid++;
746 }
747
748 if (mem_map[MAP_NR(page)] != 1)
749 goto check_table;
750 page &= PAGE_MASK;
751 shp->shm_pages[idx] = swap_nr;
752 if (invalid)
753 invalidate();
754 write_swap_page (swap_nr, (char *) page);
755 free_page (page);
756 swap_successes++;
757 shm_swp++;
758 shm_rss--;
759 return 1;
760 }