This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- shm_map
- sys_shmat
- shm_open
- shm_close
- sys_shmdt
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <asm/segment.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/ipc.h>
13 #include <linux/shm.h>
14 #include <linux/stat.h>
15 #include <linux/malloc.h>
16
17 extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
18 extern unsigned int get_swap_page (void);
19 static int findkey (key_t key);
20 static int newseg (key_t key, int shmflg, int size);
21 static int shm_map (struct vm_area_struct *shmd, int remap);
22 static void killseg (int id);
23 static void shm_open (struct vm_area_struct *shmd);
24 static void shm_close (struct vm_area_struct *shmd);
25 static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long);
26
27 static int shm_tot = 0;
28 static int shm_rss = 0;
29 static int shm_swp = 0;
30 static int max_shmid = 0;
31 static struct wait_queue *shm_lock = NULL;
32 static struct shmid_ds *shm_segs[SHMMNI];
33
34 static unsigned short shm_seq = 0;
35
36
37 static ulong swap_attempts = 0;
38 static ulong swap_successes = 0;
39 static ulong used_segs = 0;
40
41 void shm_init (void)
42 {
43 int id;
44
45 for (id = 0; id < SHMMNI; id++)
46 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
47 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
48 shm_lock = NULL;
49 return;
50 }
51
52 static int findkey (key_t key)
53 {
54 int id;
55 struct shmid_ds *shp;
56
57 for (id = 0; id <= max_shmid; id++) {
58 while ((shp = shm_segs[id]) == IPC_NOID)
59 sleep_on (&shm_lock);
60 if (shp == IPC_UNUSED)
61 continue;
62 if (key == shp->shm_perm.key)
63 return id;
64 }
65 return -1;
66 }
67
68
69
70
71 static int newseg (key_t key, int shmflg, int size)
72 {
73 struct shmid_ds *shp;
74 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
75 int id, i;
76
77 if (size < SHMMIN)
78 return -EINVAL;
79 if (shm_tot + numpages >= SHMALL)
80 return -ENOSPC;
81 for (id = 0; id < SHMMNI; id++)
82 if (shm_segs[id] == IPC_UNUSED) {
83 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
84 goto found;
85 }
86 return -ENOSPC;
87
88 found:
89 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
90 if (!shp) {
91 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
92 if (shm_lock)
93 wake_up (&shm_lock);
94 return -ENOMEM;
95 }
96
97 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
98 if (!shp->shm_pages) {
99 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
100 if (shm_lock)
101 wake_up (&shm_lock);
102 kfree(shp);
103 return -ENOMEM;
104 }
105
106 for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
107 shm_tot += numpages;
108 shp->shm_perm.key = key;
109 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
110 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
111 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
112 shp->shm_perm.seq = shm_seq;
113 shp->shm_segsz = size;
114 shp->shm_cpid = current->pid;
115 shp->attaches = NULL;
116 shp->shm_lpid = shp->shm_nattch = 0;
117 shp->shm_atime = shp->shm_dtime = 0;
118 shp->shm_ctime = CURRENT_TIME;
119 shp->shm_npages = numpages;
120
121 if (id > max_shmid)
122 max_shmid = id;
123 shm_segs[id] = shp;
124 used_segs++;
125 if (shm_lock)
126 wake_up (&shm_lock);
127 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
128 }
129
130 int sys_shmget (key_t key, int size, int shmflg)
131 {
132 struct shmid_ds *shp;
133 int id = 0;
134
135 if (size < 0 || size > SHMMAX)
136 return -EINVAL;
137 if (key == IPC_PRIVATE)
138 return newseg(key, shmflg, size);
139 if ((id = findkey (key)) == -1) {
140 if (!(shmflg & IPC_CREAT))
141 return -ENOENT;
142 return newseg(key, shmflg, size);
143 }
144 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
145 return -EEXIST;
146 shp = shm_segs[id];
147 if (shp->shm_perm.mode & SHM_DEST)
148 return -EIDRM;
149 if (size > shp->shm_segsz)
150 return -EINVAL;
151 if (ipcperms (&shp->shm_perm, shmflg))
152 return -EACCES;
153 return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
154 }
155
156
157
158
159
160 static void killseg (int id)
161 {
162 struct shmid_ds *shp;
163 int i, numpages;
164 ulong page;
165
166 shp = shm_segs[id];
167 if (shp == IPC_NOID || shp == IPC_UNUSED) {
168 printk ("shm nono: killseg called on unused seg id=%d\n", id);
169 return;
170 }
171 shp->shm_perm.seq++;
172 shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI);
173 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
174 used_segs--;
175 if (id == max_shmid)
176 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
177 if (!shp->shm_pages) {
178 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
179 return;
180 }
181 numpages = shp->shm_npages;
182 for (i = 0; i < numpages ; i++) {
183 if (!(page = shp->shm_pages[i]))
184 continue;
185 if (page & PAGE_PRESENT) {
186 free_page (page & PAGE_MASK);
187 shm_rss--;
188 } else {
189 swap_free (page);
190 shm_swp--;
191 }
192 }
193 kfree(shp->shm_pages);
194 shm_tot -= numpages;
195 kfree(shp);
196 return;
197 }
198
199 int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
200 {
201 struct shmid_ds tbuf;
202 struct shmid_ds *shp;
203 struct ipc_perm *ipcp;
204 int id, err;
205
206 if (cmd < 0 || shmid < 0)
207 return -EINVAL;
208 if (cmd == IPC_SET) {
209 if (!buf)
210 return -EFAULT;
211 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
212 if (err)
213 return err;
214 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
215 }
216
217 switch (cmd) {
218 case IPC_INFO:
219 {
220 struct shminfo shminfo;
221 if (!buf)
222 return -EFAULT;
223 shminfo.shmmni = SHMMNI;
224 shminfo.shmmax = SHMMAX;
225 shminfo.shmmin = SHMMIN;
226 shminfo.shmall = SHMALL;
227 shminfo.shmseg = SHMSEG;
228 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
229 if (err)
230 return err;
231 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
232 return max_shmid;
233 }
234 case SHM_INFO:
235 {
236 struct shm_info shm_info;
237 if (!buf)
238 return -EFAULT;
239 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
240 if (err)
241 return err;
242 shm_info.used_ids = used_segs;
243 shm_info.shm_rss = shm_rss;
244 shm_info.shm_tot = shm_tot;
245 shm_info.shm_swp = shm_swp;
246 shm_info.swap_attempts = swap_attempts;
247 shm_info.swap_successes = swap_successes;
248 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
249 return max_shmid;
250 }
251 case SHM_STAT:
252 if (!buf)
253 return -EFAULT;
254 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
255 if (err)
256 return err;
257 if (shmid > max_shmid)
258 return -EINVAL;
259 shp = shm_segs[shmid];
260 if (shp == IPC_UNUSED || shp == IPC_NOID)
261 return -EINVAL;
262 if (ipcperms (&shp->shm_perm, S_IRUGO))
263 return -EACCES;
264 id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
265 tbuf.shm_perm = shp->shm_perm;
266 tbuf.shm_segsz = shp->shm_segsz;
267 tbuf.shm_atime = shp->shm_atime;
268 tbuf.shm_dtime = shp->shm_dtime;
269 tbuf.shm_ctime = shp->shm_ctime;
270 tbuf.shm_cpid = shp->shm_cpid;
271 tbuf.shm_lpid = shp->shm_lpid;
272 tbuf.shm_nattch = shp->shm_nattch;
273 memcpy_tofs (buf, &tbuf, sizeof(*buf));
274 return id;
275 }
276
277 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
278 if (shp == IPC_UNUSED || shp == IPC_NOID)
279 return -EINVAL;
280 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
281 return -EIDRM;
282 ipcp = &shp->shm_perm;
283
284 switch (cmd) {
285 case SHM_UNLOCK:
286 if (!suser())
287 return -EPERM;
288 if (!(ipcp->mode & SHM_LOCKED))
289 return -EINVAL;
290 ipcp->mode &= ~SHM_LOCKED;
291 break;
292 case SHM_LOCK:
293
294
295
296 if (!suser())
297 return -EPERM;
298 if (ipcp->mode & SHM_LOCKED)
299 return -EINVAL;
300 ipcp->mode |= SHM_LOCKED;
301 break;
302 case IPC_STAT:
303 if (ipcperms (ipcp, S_IRUGO))
304 return -EACCES;
305 if (!buf)
306 return -EFAULT;
307 err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
308 if (err)
309 return err;
310 tbuf.shm_perm = shp->shm_perm;
311 tbuf.shm_segsz = shp->shm_segsz;
312 tbuf.shm_atime = shp->shm_atime;
313 tbuf.shm_dtime = shp->shm_dtime;
314 tbuf.shm_ctime = shp->shm_ctime;
315 tbuf.shm_cpid = shp->shm_cpid;
316 tbuf.shm_lpid = shp->shm_lpid;
317 tbuf.shm_nattch = shp->shm_nattch;
318 memcpy_tofs (buf, &tbuf, sizeof(*buf));
319 break;
320 case IPC_SET:
321 if (suser() || current->euid == shp->shm_perm.uid ||
322 current->euid == shp->shm_perm.cuid) {
323 ipcp->uid = tbuf.shm_perm.uid;
324 ipcp->gid = tbuf.shm_perm.gid;
325 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
326 | (tbuf.shm_perm.mode & S_IRWXUGO);
327 shp->shm_ctime = CURRENT_TIME;
328 break;
329 }
330 return -EPERM;
331 case IPC_RMID:
332 if (suser() || current->euid == shp->shm_perm.uid ||
333 current->euid == shp->shm_perm.cuid) {
334 shp->shm_perm.mode |= SHM_DEST;
335 if (shp->shm_nattch <= 0)
336 killseg (id);
337 break;
338 }
339 return -EPERM;
340 default:
341 return -EINVAL;
342 }
343 return 0;
344 }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 static struct vm_operations_struct shm_vm_ops = {
360 shm_open,
361 shm_close,
362 NULL,
363 NULL,
364 NULL,
365 NULL,
366 NULL,
367 NULL,
368 NULL,
369 shm_swap_in
370 };
371
372
373
374
375
376
377 static int shm_map (struct vm_area_struct *shmd, int remap)
378 {
379 unsigned long *page_table;
380 unsigned long tmp, shm_sgn;
381 unsigned long page_dir = shmd->vm_task->tss.cr3;
382
383
384 if (!remap)
385 for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
386 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
387 if (*page_table & PAGE_PRESENT) {
388 page_table = (ulong *) (PAGE_MASK & *page_table);
389 page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
390 if (*page_table) {
391
392 return -EINVAL;
393 }
394 }
395 }
396
397
398 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
399
400
401 insert_vm_struct(current, shmd);
402 merge_segments(current->mm->mmap);
403
404
405 for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
406 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
407 if (*page_table & PAGE_PRESENT) {
408 page_table = (ulong *) (PAGE_MASK & *page_table);
409 page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
410 if (*page_table) {
411 if (*page_table & PAGE_PRESENT) {
412 --current->mm->rss;
413 free_page (*page_table & PAGE_MASK);
414 }
415 else
416 swap_free (*page_table);
417 *page_table = 0;
418 }
419 } else {
420 unsigned long new_pt;
421 if (!(new_pt = get_free_page(GFP_KERNEL)))
422 return -ENOMEM;
423 *page_table = new_pt | PAGE_TABLE;
424 tmp |= ((PAGE_SIZE << 10) - PAGE_SIZE);
425 }
426 }
427
428
429 shm_sgn = shmd->vm_pte + ((shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
430 for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE,
431 shm_sgn += (1 << SHM_IDX_SHIFT)) {
432 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
433 page_table = (ulong *) (PAGE_MASK & *page_table);
434 page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
435 *page_table = shm_sgn;
436 }
437 invalidate();
438 return 0;
439 }
440
441
442
443
444 int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
445 {
446 struct shmid_ds *shp;
447 struct vm_area_struct *shmd;
448 int err;
449 unsigned int id;
450 unsigned long addr;
451
452 if (shmid < 0) {
453
454 return -EINVAL;
455 }
456
457 shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
458 if (shp == IPC_UNUSED || shp == IPC_NOID) {
459
460 return -EINVAL;
461 }
462
463 if (!(addr = (ulong) shmaddr)) {
464 if (shmflg & SHM_REMAP)
465 return -EINVAL;
466 if (!(addr = get_unmapped_area(shp->shm_segsz)))
467 return -ENOMEM;
468 } else if (addr & (SHMLBA-1)) {
469 if (shmflg & SHM_RND)
470 addr &= ~(SHMLBA-1);
471 else
472 return -EINVAL;
473 }
474 if ((addr > current->mm->start_stack - 16384 - PAGE_SIZE*shp->shm_npages)) {
475
476 return -EINVAL;
477 }
478 if (!(shmflg & SHM_REMAP))
479 for (shmd = current->mm->mmap; shmd; shmd = shmd->vm_next)
480 if (!(addr >= shmd->vm_end || addr + shp->shm_segsz <= shmd->vm_start)) {
481
482
483 return -EINVAL;
484 }
485
486 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
487 return -EACCES;
488 if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
489 return -EIDRM;
490
491 shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
492 if (!shmd)
493 return -ENOMEM;
494 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
495 kfree(shmd);
496 return -EIDRM;
497 }
498
499 shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
500 (shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
501 shmd->vm_start = addr;
502 shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
503 shmd->vm_task = current;
504 shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
505 shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
506 | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
507 | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
508 shmd->vm_next_share = NULL;
509 shmd->vm_inode = NULL;
510 shmd->vm_offset = 0;
511 shmd->vm_ops = &shm_vm_ops;
512
513 shp->shm_nattch++;
514 if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
515 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
516 killseg(id);
517 kfree(shmd);
518 return err;
519 }
520
521 shmd->vm_next_share = shp->attaches;
522 shp->attaches = shmd;
523 shp->shm_lpid = current->pid;
524 shp->shm_atime = CURRENT_TIME;
525
526 *raddr = addr;
527 return 0;
528 }
529
530
531 static void shm_open (struct vm_area_struct *shmd)
532 {
533 unsigned int id;
534 struct shmid_ds *shp;
535
536 id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
537 shp = shm_segs[id];
538 if (shp == IPC_UNUSED) {
539 printk("shm_open: unused id=%d PANIC\n", id);
540 return;
541 }
542 shmd->vm_next_share = shp->attaches;
543 shp->attaches = shmd;
544 shp->shm_nattch++;
545 shp->shm_atime = CURRENT_TIME;
546 shp->shm_lpid = current->pid;
547 }
548
549
550
551
552
553
554
555 static void shm_close (struct vm_area_struct *shmd)
556 {
557 struct vm_area_struct **shmdp;
558 struct shmid_ds *shp;
559 int id;
560
561 unmap_page_range (shmd->vm_start, shmd->vm_end - shmd->vm_start);
562
563
564 id = (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK;
565 shp = shm_segs[id];
566 for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->vm_next_share)
567 if (*shmdp == shmd) {
568 *shmdp = shmd->vm_next_share;
569 goto found;
570 }
571 printk("shm_close: shm segment (id=%d) attach list inconsistent\n",id);
572 printk("shm_close: %d %08lx-%08lx %c%c%c%c %08lx %08lx\n",
573 shmd->vm_task->pid, shmd->vm_start, shmd->vm_end,
574 shmd->vm_flags & VM_READ ? 'r' : '-',
575 shmd->vm_flags & VM_WRITE ? 'w' : '-',
576 shmd->vm_flags & VM_EXEC ? 'x' : '-',
577 shmd->vm_flags & VM_SHARED ? 's' : 'p',
578 shmd->vm_offset, shmd->vm_pte);
579
580 found:
581 shp->shm_lpid = current->pid;
582 shp->shm_dtime = CURRENT_TIME;
583 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
584 killseg (id);
585 }
586
587
588
589
590
591 int sys_shmdt (char *shmaddr)
592 {
593 struct vm_area_struct *shmd, *shmdnext;
594
595 for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
596 shmdnext = shmd->vm_next;
597 if (shmd->vm_ops == &shm_vm_ops
598 && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
599 do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
600 }
601 return 0;
602 }
603
604
605
606
607 static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code)
608 {
609 unsigned long page;
610 struct shmid_ds *shp;
611 unsigned int id, idx;
612
613 id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
614 if (id > max_shmid) {
615 printk ("shm_no_page: id=%d too big. proc mem corrupted\n", id);
616 return BAD_PAGE | PAGE_SHARED;
617 }
618 shp = shm_segs[id];
619 if (shp == IPC_UNUSED || shp == IPC_NOID) {
620 printk ("shm_no_page: id=%d invalid. Race.\n", id);
621 return BAD_PAGE | PAGE_SHARED;
622 }
623 idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
624 if (idx >= shp->shm_npages) {
625 printk ("shm_no_page : too large page index. id=%d\n", id);
626 return BAD_PAGE | PAGE_SHARED;
627 }
628
629 if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
630 if(!(page = get_free_page(GFP_KERNEL))) {
631 oom(current);
632 return BAD_PAGE | PAGE_SHARED;
633 }
634 if (shp->shm_pages[idx] & PAGE_PRESENT) {
635 free_page (page);
636 goto done;
637 }
638 if (shp->shm_pages[idx]) {
639 read_swap_page (shp->shm_pages[idx], (char *) page);
640 if (shp->shm_pages[idx] & PAGE_PRESENT) {
641 free_page (page);
642 goto done;
643 }
644 swap_free (shp->shm_pages[idx]);
645 shm_swp--;
646 }
647 shm_rss++;
648 shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
649 } else
650 --current->mm->maj_flt;
651
652 done:
653 current->mm->min_flt++;
654 page = shp->shm_pages[idx];
655 if (code & SHM_READ_ONLY)
656 page &= ~PAGE_RW;
657 mem_map[MAP_NR(page)]++;
658 return page;
659 }
660
661
662
663
664 static unsigned long swap_id = 0;
665 static unsigned long swap_idx = 0;
666
667 int shm_swap (int prio)
668 {
669 unsigned long page;
670 struct shmid_ds *shp;
671 struct vm_area_struct *shmd;
672 unsigned int swap_nr;
673 unsigned long id, idx, invalid = 0;
674 int counter;
675
676 counter = shm_rss >> prio;
677 if (!counter || !(swap_nr = get_swap_page()))
678 return 0;
679
680 check_id:
681 shp = shm_segs[swap_id];
682 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
683 swap_idx = 0;
684 if (++swap_id > max_shmid)
685 swap_id = 0;
686 goto check_id;
687 }
688 id = swap_id;
689
690 check_table:
691 idx = swap_idx++;
692 if (idx >= shp->shm_npages) {
693 swap_idx = 0;
694 if (++swap_id > max_shmid)
695 swap_id = 0;
696 goto check_id;
697 }
698
699 page = shp->shm_pages[idx];
700 if (!(page & PAGE_PRESENT))
701 goto check_table;
702 swap_attempts++;
703
704 if (--counter < 0) {
705 if (invalid)
706 invalidate();
707 swap_free (swap_nr);
708 return 0;
709 }
710 for (shmd = shp->attaches; shmd; shmd = shmd->vm_next_share) {
711 unsigned long tmp, *pte;
712 if ((shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
713 printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n", id, shmd->vm_pte >> SHM_ID_SHIFT & SHM_ID_MASK);
714 continue;
715 }
716 tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
717 if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
718 continue;
719 pte = PAGE_DIR_OFFSET(shmd->vm_task->tss.cr3,tmp);
720 if (!(*pte & PAGE_PRESENT)) {
721 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
722 id, shmd->vm_start, idx);
723 *pte = 0;
724 continue;
725 }
726 pte = (ulong *) (PAGE_MASK & *pte);
727 pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
728 tmp = *pte;
729 if (!(tmp & PAGE_PRESENT))
730 continue;
731 if (tmp & PAGE_ACCESSED) {
732 *pte &= ~PAGE_ACCESSED;
733 continue;
734 }
735 tmp = shmd->vm_pte | idx << SHM_IDX_SHIFT;
736 *pte = tmp;
737 mem_map[MAP_NR(page)]--;
738 shmd->vm_task->mm->rss--;
739 invalid++;
740 }
741
742 if (mem_map[MAP_NR(page)] != 1)
743 goto check_table;
744 page &= PAGE_MASK;
745 shp->shm_pages[idx] = swap_nr;
746 if (invalid)
747 invalidate();
748 write_swap_page (swap_nr, (char *) page);
749 free_page (page);
750 swap_successes++;
751 shm_swp++;
752 shm_rss--;
753 return 1;
754 }