This source file includes following definitions.
- shm_init
- findkey
- newseg
- sys_shmget
- killseg
- sys_shmctl
- shm_map
- add_vm_area
- sys_shmat
- detach
- sys_shmdt
- shm_exit
- shm_fork
- shm_swap_in
- shm_swap
1
2
3
4
5
6
7
8 #include <linux/errno.h>
9 #include <asm/segment.h>
10 #include <linux/sched.h>
11 #include <linux/ipc.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/malloc.h>
15
16 extern int ipcperms (struct ipc_perm *ipcp, short semflg);
17 extern unsigned int get_swap_page(void);
18 static int findkey (key_t key);
19 static int newseg (key_t key, int shmflg, int size);
20 static int shm_map (struct shm_desc *shmd, int remap);
21 static void killseg (int id);
22 static unsigned long shm_swap_in(struct vm_area_struct *, unsigned long);
23
24 static int shm_tot = 0;
25 static int shm_rss = 0;
26 static int shm_swp = 0;
27 static int max_shmid = 0;
28 static struct wait_queue *shm_lock = NULL;
29 static struct shmid_ds *shm_segs[SHMMNI];
30
31 static unsigned short shm_seq = 0;
32
33
34 static ulong swap_attempts = 0;
35 static ulong swap_successes = 0;
36 static ulong used_segs = 0;
37
38 void shm_init (void)
39 {
40 int id;
41
42 for (id = 0; id < SHMMNI; id++)
43 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
44 shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
45 shm_lock = NULL;
46 return;
47 }
48
49 static int findkey (key_t key)
50 {
51 int id;
52 struct shmid_ds *shp;
53
54 for (id=0; id <= max_shmid; id++) {
55 while ((shp = shm_segs[id]) == IPC_NOID)
56 sleep_on (&shm_lock);
57 if (shp == IPC_UNUSED)
58 continue;
59 if (key == shp->shm_perm.key)
60 return id;
61 }
62 return -1;
63 }
64
65
66
67
68 static int newseg (key_t key, int shmflg, int size)
69 {
70 struct shmid_ds *shp;
71 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
72 int id, i;
73
74 if (size < SHMMIN)
75 return -EINVAL;
76 if (shm_tot + numpages >= SHMALL)
77 return -ENOSPC;
78 for (id=0; id < SHMMNI; id++)
79 if (shm_segs[id] == IPC_UNUSED) {
80 shm_segs[id] = (struct shmid_ds *) IPC_NOID;
81 goto found;
82 }
83 return -ENOSPC;
84
85 found:
86 shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
87 if (!shp) {
88 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
89 if (shm_lock)
90 wake_up (&shm_lock);
91 return -ENOMEM;
92 }
93
94 shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
95 if (!shp->shm_pages) {
96 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
97 if (shm_lock)
98 wake_up (&shm_lock);
99 kfree(shp);
100 return -ENOMEM;
101 }
102
103 for (i=0; i< numpages; shp->shm_pages[i++] = 0);
104 shm_tot += numpages;
105 shp->shm_perm.key = key;
106 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
107 shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
108 shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
109 shp->shm_perm.seq = shm_seq;
110 shp->shm_segsz = size;
111 shp->shm_cpid = current->pid;
112 shp->attaches = NULL;
113 shp->shm_lpid = shp->shm_nattch = 0;
114 shp->shm_atime = shp->shm_dtime = 0;
115 shp->shm_ctime = CURRENT_TIME;
116 shp->shm_npages = numpages;
117
118 if (id > max_shmid)
119 max_shmid = id;
120 shm_segs[id] = shp;
121 used_segs++;
122 if (shm_lock)
123 wake_up (&shm_lock);
124 return id + (int)shm_seq*SHMMNI;
125 }
126
127 int sys_shmget (key_t key, int size, int shmflg)
128 {
129 struct shmid_ds *shp;
130 int id = 0;
131
132 if (size < 0 || size > SHMMAX)
133 return -EINVAL;
134 if (key == IPC_PRIVATE)
135 return newseg(key, shmflg, size);
136 if ((id = findkey (key)) == -1) {
137 if (!(shmflg & IPC_CREAT))
138 return -ENOENT;
139 return newseg(key, shmflg, size);
140 }
141 if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
142 return -EEXIST;
143 shp = shm_segs[id];
144 if (shp->shm_perm.mode & SHM_DEST)
145 return -EIDRM;
146 if (size > shp->shm_segsz)
147 return -EINVAL;
148 if (ipcperms (&shp->shm_perm, shmflg))
149 return -EACCES;
150 return shp->shm_perm.seq*SHMMNI + id;
151 }
152
153
154
155
156
157 static void killseg (int id)
158 {
159 struct shmid_ds *shp;
160 int i, numpages;
161 ulong page;
162
163 shp = shm_segs[id];
164 if (shp == IPC_NOID || shp == IPC_UNUSED) {
165 printk ("shm nono: killseg called on unused seg id=%d\n", id);
166 return;
167 }
168 shp->shm_perm.seq++;
169 numpages = shp->shm_npages;
170 shm_seq++;
171 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
172 used_segs--;
173 if (id == max_shmid)
174 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
175 if (!shp->shm_pages) {
176 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
177 return;
178 }
179 for (i=0; i< numpages ; i++) {
180 if (!(page = shp->shm_pages[i]))
181 continue;
182 if (page & 1) {
183 free_page (page & PAGE_MASK);
184 shm_rss--;
185 } else {
186 swap_free (page);
187 shm_swp--;
188 }
189 }
190 kfree(shp->shm_pages);
191 shm_tot -= numpages;
192 kfree(shp);
193 return;
194 }
195
196 int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
197 {
198 struct shmid_ds *shp, tbuf;
199 struct ipc_perm *ipcp;
200 int id, err;
201
202 if (cmd < 0 || shmid < 0)
203 return -EINVAL;
204 if (cmd == IPC_SET) {
205 if (!buf)
206 return -EFAULT;
207 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
208 if (err)
209 return err;
210 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
211 }
212
213 switch (cmd) {
214 case IPC_INFO:
215 {
216 struct shminfo shminfo;
217 if (!buf)
218 return -EFAULT;
219 shminfo.shmmni = SHMMNI;
220 shminfo.shmmax = SHMMAX;
221 shminfo.shmmin = SHMMIN;
222 shminfo.shmall = SHMALL;
223 shminfo.shmseg = SHMSEG;
224 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
225 if (err)
226 return err;
227 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
228 return max_shmid;
229 }
230 case SHM_INFO:
231 {
232 struct shm_info shm_info;
233 if (!buf)
234 return -EFAULT;
235 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
236 if (err)
237 return err;
238 shm_info.used_ids = used_segs;
239 shm_info.shm_rss = shm_rss;
240 shm_info.shm_tot = shm_tot;
241 shm_info.shm_swp = shm_swp;
242 shm_info.swap_attempts = swap_attempts;
243 shm_info.swap_successes = swap_successes;
244 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
245 return max_shmid;
246 }
247 case SHM_STAT:
248 if (!buf)
249 return -EFAULT;
250 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
251 if (err)
252 return err;
253 if (shmid > max_shmid)
254 return -EINVAL;
255 shp = shm_segs[shmid];
256 if (shp == IPC_UNUSED || shp == IPC_NOID)
257 return -EINVAL;
258 if (ipcperms (&shp->shm_perm, S_IRUGO))
259 return -EACCES;
260 id = shmid + shp->shm_perm.seq * SHMMNI;
261 memcpy_tofs (buf, shp, sizeof(*shp));
262 return id;
263 }
264
265 shp = shm_segs[id = shmid % SHMMNI];
266 if (shp == IPC_UNUSED || shp == IPC_NOID)
267 return -EINVAL;
268 ipcp = &shp->shm_perm;
269 if (ipcp->seq != shmid / SHMMNI)
270 return -EIDRM;
271
272 switch (cmd) {
273 case SHM_UNLOCK:
274 if (!suser())
275 return -EPERM;
276 if (!(ipcp->mode & SHM_LOCKED))
277 return -EINVAL;
278 ipcp->mode &= ~SHM_LOCKED;
279 break;
280 case SHM_LOCK:
281
282
283
284 if (!suser())
285 return -EPERM;
286 if (ipcp->mode & SHM_LOCKED)
287 return -EINVAL;
288 ipcp->mode |= SHM_LOCKED;
289 break;
290 case IPC_STAT:
291 if (ipcperms (ipcp, S_IRUGO))
292 return -EACCES;
293 if (!buf)
294 return -EFAULT;
295 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
296 if (err)
297 return err;
298 memcpy_tofs (buf, shp, sizeof(*shp));
299 break;
300 case IPC_SET:
301 if (suser() || current->euid == shp->shm_perm.uid ||
302 current->euid == shp->shm_perm.cuid) {
303 ipcp->uid = tbuf.shm_perm.uid;
304 ipcp->gid = tbuf.shm_perm.gid;
305 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
306 | (tbuf.shm_perm.mode & S_IRWXUGO);
307 shp->shm_ctime = CURRENT_TIME;
308 break;
309 }
310 return -EPERM;
311 case IPC_RMID:
312 if (suser() || current->euid == shp->shm_perm.uid ||
313 current->euid == shp->shm_perm.cuid) {
314 shp->shm_perm.mode |= SHM_DEST;
315 if (shp->shm_nattch <= 0)
316 killseg (id);
317 break;
318 }
319 return -EPERM;
320 default:
321 return -EINVAL;
322 }
323 return 0;
324 }
325
326
327
328
329
330
331 static int shm_map (struct shm_desc *shmd, int remap)
332 {
333 unsigned long invalid = 0;
334 unsigned long *page_table;
335 unsigned long tmp, shm_sgn;
336 unsigned long page_dir = shmd->task->tss.cr3;
337
338
339 for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) {
340 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
341 if (*page_table & PAGE_PRESENT) {
342 page_table = (ulong *) (PAGE_MASK & *page_table);
343 page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
344 if (*page_table) {
345 if (!remap)
346 return -EINVAL;
347 if (*page_table & PAGE_PRESENT) {
348 --current->mm->rss;
349 free_page (*page_table & PAGE_MASK);
350 }
351 else
352 swap_free (*page_table);
353 invalid++;
354 }
355 continue;
356 }
357 {
358 unsigned long new_pt;
359 if(!(new_pt = get_free_page(GFP_KERNEL)))
360 return -ENOMEM;
361 *page_table = new_pt | PAGE_TABLE;
362 tmp |= ((PAGE_SIZE << 10) - PAGE_SIZE);
363 }}
364 if (invalid)
365 invalidate();
366
367
368 shm_sgn = shmd->shm_sgn;
369 for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE,
370 shm_sgn += (1 << SHM_IDX_SHIFT)) {
371 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
372 page_table = (ulong *) (PAGE_MASK & *page_table);
373 page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
374 *page_table = shm_sgn;
375 }
376 return 0;
377 }
378
379 static struct vm_operations_struct shm_vm_ops = {
380 NULL,
381 NULL,
382 NULL,
383 NULL,
384 NULL,
385 NULL,
386 NULL,
387 shm_swap_in
388 };
389
390
391
392
393
394
395
396 static int add_vm_area(unsigned long addr, unsigned long len, int readonly)
397 {
398 struct vm_area_struct * vma;
399
400 vma = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
401 if (!vma)
402 return -ENOMEM;
403 do_munmap(addr, len);
404 vma->vm_task = current;
405 vma->vm_start = addr;
406 vma->vm_end = addr + len;
407 vma->vm_flags = VM_SHM | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC;
408 if (readonly)
409 vma->vm_page_prot = PAGE_READONLY;
410 else {
411 vma->vm_flags |= VM_MAYWRITE | VM_WRITE;
412 vma->vm_page_prot = PAGE_SHARED;
413 }
414 vma->vm_share = NULL;
415 vma->vm_inode = NULL;
416 vma->vm_offset = 0;
417 vma->vm_ops = &shm_vm_ops;
418 insert_vm_struct(current, vma);
419 merge_segments(current->mm->mmap);
420 return 0;
421 }
422
423
424
425
426
427
428 int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
429 {
430 struct shmid_ds *shp;
431 struct shm_desc *shmd;
432 int err;
433 unsigned int id;
434 unsigned long addr;
435
436 if (shmid < 0)
437 return -EINVAL;
438
439 if (raddr) {
440 err = verify_area(VERIFY_WRITE, raddr, sizeof(long));
441 if (err)
442 return err;
443 }
444
445 shp = shm_segs[id = shmid % SHMMNI];
446 if (shp == IPC_UNUSED || shp == IPC_NOID)
447 return -EINVAL;
448
449 if (!(addr = (ulong) shmaddr)) {
450 if (shmflg & SHM_REMAP)
451 return -EINVAL;
452
453 addr = SHM_RANGE_END;
454 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
455 if (shmd->start < SHM_RANGE_START)
456 continue;
457 if (addr >= shmd->start)
458 addr = shmd->start;
459 }
460 addr = (addr - shp->shm_segsz) & PAGE_MASK;
461 } else if (addr & (SHMLBA-1)) {
462 if (shmflg & SHM_RND)
463 addr &= ~(SHMLBA-1);
464 else
465 return -EINVAL;
466 }
467 if ((addr > current->mm->start_stack - 16384 - PAGE_SIZE*shp->shm_npages))
468 return -EINVAL;
469 if (shmflg & SHM_REMAP)
470 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
471 if (addr >= shmd->start && addr < shmd->end)
472 return -EINVAL;
473 if (addr + shp->shm_segsz >= shmd->start &&
474 addr + shp->shm_segsz < shmd->end)
475 return -EINVAL;
476 }
477
478 if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
479 return -EACCES;
480 if (shp->shm_perm.seq != shmid / SHMMNI)
481 return -EIDRM;
482
483 shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL);
484 if (!shmd)
485 return -ENOMEM;
486 if ((shp != shm_segs[id]) || (shp->shm_perm.seq != shmid / SHMMNI)) {
487 kfree(shmd);
488 return -EIDRM;
489 }
490 shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
491 (shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
492 shmd->start = addr;
493 shmd->end = addr + shp->shm_npages * PAGE_SIZE;
494 shmd->task = current;
495
496 if ((err = add_vm_area(shmd->start, shmd->end - shmd->start, shmflg & SHM_RDONLY))) {
497 kfree(shmd);
498 return err;
499 }
500
501 shp->shm_nattch++;
502
503 if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
504 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
505 killseg(id);
506 kfree(shmd);
507 return err;
508 }
509
510 shmd->task_next = current->shm;
511 current->shm = shmd;
512 shmd->seg_next = shp->attaches;
513 shp->attaches = shmd;
514 shp->shm_lpid = current->pid;
515 shp->shm_atime = CURRENT_TIME;
516 if (!raddr)
517 return addr;
518 put_fs_long (addr, raddr);
519 return 0;
520 }
521
522
523
524
525
526
527 static void detach (struct shm_desc **shmdp)
528 {
529 struct shm_desc *shmd = *shmdp;
530 struct shmid_ds *shp;
531 int id;
532
533 id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
534 shp = shm_segs[id];
535 *shmdp = shmd->task_next;
536 for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->seg_next)
537 if (*shmdp == shmd) {
538 *shmdp = shmd->seg_next;
539 goto found;
540 }
541 printk("detach: shm segment (id=%d) attach list inconsistent\n",id);
542
543 found:
544 do_munmap(shmd->start, shp->shm_segsz);
545 kfree(shmd);
546 shp->shm_lpid = current->pid;
547 shp->shm_dtime = CURRENT_TIME;
548 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
549 killseg (id);
550 return;
551 }
552
553
554
555
556
557 int sys_shmdt (char *shmaddr)
558 {
559 struct shm_desc *shmd, **shmdp;
560
561 for (shmdp = ¤t->shm; (shmd = *shmdp); shmdp=&shmd->task_next) {
562 if (shmd->start == (ulong) shmaddr) {
563 detach (shmdp);
564 return 0;
565 }
566 }
567 return -EINVAL;
568 }
569
570
571
572
573 void shm_exit (void)
574 {
575 while (current->shm)
576 detach(¤t->shm);
577 return;
578 }
579
580
581
582
583
584
585 int shm_fork (struct task_struct *p1, struct task_struct *p2)
586 {
587 struct shm_desc *shmd, *new_desc = NULL, *tmp;
588 struct shmid_ds *shp;
589 int id;
590
591 p2->semun = NULL;
592 p2->shm = NULL;
593 if (!p1->shm)
594 return 0;
595 for (shmd = p1->shm; shmd; shmd = shmd->task_next) {
596 tmp = (struct shm_desc *) kmalloc(sizeof(*tmp), GFP_KERNEL);
597 if (!tmp) {
598 while (new_desc) {
599 tmp = new_desc->task_next;
600 kfree(new_desc);
601 new_desc = tmp;
602 }
603 free_page_tables (p2);
604 return -ENOMEM;
605 }
606 *tmp = *shmd;
607 tmp->task = p2;
608 tmp->task_next = new_desc;
609 new_desc = tmp;
610 }
611 p2->shm = new_desc;
612 for (shmd = new_desc; shmd; shmd = shmd->task_next) {
613 id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
614 shp = shm_segs[id];
615 if (shp == IPC_UNUSED) {
616 printk("shm_fork: unused id=%d PANIC\n", id);
617 return -ENOMEM;
618 }
619 shmd->seg_next = shp->attaches;
620 shp->attaches = shmd;
621 shp->shm_nattch++;
622 shp->shm_atime = CURRENT_TIME;
623 shp->shm_lpid = current->pid;
624 }
625 return 0;
626 }
627
628
629
630
631 static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code)
632 {
633 unsigned long page;
634 struct shmid_ds *shp;
635 unsigned int id, idx;
636
637 id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
638 if (id > max_shmid) {
639 printk ("shm_no_page: id=%d too big. proc mem corrupted\n", id);
640 return BAD_PAGE | PAGE_SHARED;
641 }
642 shp = shm_segs[id];
643 if (shp == IPC_UNUSED || shp == IPC_NOID) {
644 printk ("shm_no_page: id=%d invalid. Race.\n", id);
645 return BAD_PAGE | PAGE_SHARED;
646 }
647 idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
648 if (idx >= shp->shm_npages) {
649 printk ("shm_no_page : too large page index. id=%d\n", id);
650 return BAD_PAGE | PAGE_SHARED;
651 }
652
653 if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
654 if(!(page = get_free_page(GFP_KERNEL))) {
655 oom(current);
656 return BAD_PAGE | PAGE_SHARED;
657 }
658 if (shp->shm_pages[idx] & PAGE_PRESENT) {
659 free_page (page);
660 goto done;
661 }
662 if (shp->shm_pages[idx]) {
663 read_swap_page (shp->shm_pages[idx], (char *) page);
664 if (shp->shm_pages[idx] & PAGE_PRESENT) {
665 free_page (page);
666 goto done;
667 }
668 swap_free (shp->shm_pages[idx]);
669 shm_swp--;
670 }
671 shm_rss++;
672 shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
673 } else
674 --current->mm->maj_flt;
675
676 done:
677 current->mm->min_flt++;
678 page = shp->shm_pages[idx];
679 if (code & SHM_READ_ONLY)
680 page &= ~PAGE_RW;
681 mem_map[MAP_NR(page)]++;
682 return page;
683 }
684
685
686
687
688 static unsigned long swap_id = 0;
689 static unsigned long swap_idx = 0;
690
691 int shm_swap (int prio)
692 {
693 unsigned long page;
694 struct shmid_ds *shp;
695 struct shm_desc *shmd;
696 unsigned int swap_nr;
697 unsigned long id, idx, invalid = 0;
698 int counter;
699
700 counter = shm_rss >> prio;
701 if (!counter || !(swap_nr = get_swap_page()))
702 return 0;
703
704 check_id:
705 shp = shm_segs[swap_id];
706 if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
707 swap_idx = 0;
708 if (++swap_id > max_shmid)
709 swap_id = 0;
710 goto check_id;
711 }
712 id = swap_id;
713
714 check_table:
715 idx = swap_idx++;
716 if (idx >= shp->shm_npages) {
717 swap_idx = 0;
718 if (++swap_id > max_shmid)
719 swap_id = 0;
720 goto check_id;
721 }
722
723 page = shp->shm_pages[idx];
724 if (!(page & PAGE_PRESENT))
725 goto check_table;
726 swap_attempts++;
727
728 if (--counter < 0) {
729 if (invalid)
730 invalidate();
731 swap_free (swap_nr);
732 return 0;
733 }
734 for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) {
735 unsigned long tmp, *pte;
736 if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
737 printk ("shm_swap: id=%ld does not match shmd\n", id);
738 continue;
739 }
740 tmp = shmd->start + (idx << PAGE_SHIFT);
741 if (tmp >= shmd->end) {
742 printk ("shm_swap: too large idx=%ld id=%ld PANIC\n",idx, id);
743 continue;
744 }
745 pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp);
746 if (!(*pte & 1)) {
747 printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
748 id, shmd->start, idx);
749 *pte = 0;
750 continue;
751 }
752 pte = (ulong *) (PAGE_MASK & *pte);
753 pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
754 tmp = *pte;
755 if (!(tmp & PAGE_PRESENT))
756 continue;
757 if (tmp & PAGE_ACCESSED) {
758 *pte &= ~PAGE_ACCESSED;
759 continue;
760 }
761 tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT;
762 *pte = tmp;
763 mem_map[MAP_NR(page)]--;
764 shmd->task->mm->rss--;
765 invalid++;
766 }
767
768 if (mem_map[MAP_NR(page)] != 1)
769 goto check_table;
770 page &= PAGE_MASK;
771 shp->shm_pages[idx] = swap_nr;
772 if (invalid)
773 invalidate();
774 write_swap_page (swap_nr, (char *) page);
775 free_page (page);
776 swap_successes++;
777 shm_swp++;
778 shm_rss--;
779 return 1;
780 }