root/ipc/shm.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. shm_init
  2. findkey
  3. newseg
  4. sys_shmget
  5. killseg
  6. sys_shmctl
  7. shm_map
  8. sys_shmat
  9. detach
  10. sys_shmdt
  11. shm_exit
  12. shm_fork
  13. shm_no_page
  14. shm_swap

   1 /*
   2  * linux/ipc/shm.c
   3  * Copyright (C) 1992, 1993 Krishna Balasubramanian 
   4  *         Many improvements/fixes by Bruno Haible.
   5  * assume user segments start at 0x00
   6  */
   7 
   8 #include <linux/errno.h>
   9 #include <asm/segment.h>
  10 #include <linux/sched.h>
  11 #include <linux/ipc.h> 
  12 #include <linux/shm.h>
  13 
  14 extern int ipcperms (struct ipc_perm *ipcp, short semflg);
  15 extern unsigned int get_swap_page(void);
  16 static int findkey (key_t key);
  17 static int newseg (key_t key, int shmflg, int size);
  18 static int shm_map (struct shm_desc *shmd, int remap);
  19 static void killseg (int id);
  20 
  21 static int shm_tot = 0;  /* total number of shared memory pages */
  22 static int shm_rss = 0; /* number of shared memory pages that are in memory */
  23 static int shm_swp = 0; /* number of shared memory pages that are in swap */
  24 static int shm_seq = 0; /* is incremented, for recognizing stale ids */
  25 static int max_shmid = 0; /* every used id is <= max_shmid */
  26 static struct wait_queue *shm_lock = NULL;
  27 static struct shmid_ds *shm_segs[SHMMNI];
  28 
  29 /* some statistics */
  30 static ulong swap_attempts = 0;
  31 static ulong swap_successes = 0;
  32 static ulong used_segs = 0;
  33 
  34 void shm_init (void)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36         int id;
  37     
  38         for (id = 0; id < SHMMNI; id++) 
  39                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  40         shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
  41         shm_lock = NULL;
  42         return;
  43 }
  44 
  45 static int findkey (key_t key)    
     /* [previous][next][first][last][top][bottom][index][help] */
  46 {
  47         int id;
  48         struct shmid_ds *shp;
  49         
  50         for (id=0; id <= max_shmid; id++) {
  51                 while ((shp = shm_segs[id]) == IPC_NOID) 
  52                         sleep_on (&shm_lock);
  53                 if (shp == IPC_UNUSED)
  54                         continue;
  55                 if (key == shp->shm_perm.key) 
  56                         return id;
  57         }
  58         return -1;
  59 }
  60 
  61 /* 
  62  * allocate new shmid_ds and pgtable. protected by shm_segs[id] = NOID.
  63  */
  64 static int newseg (key_t key, int shmflg, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         struct shmid_ds *shp;
  67         int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  68         int id, i;
  69 
  70         if (size < SHMMIN)
  71                 return -EINVAL;
  72         if (shm_tot + numpages >= SHMALL)
  73                 return -ENOSPC;
  74         for (id=0; id < SHMMNI; id++)
  75                 if (shm_segs[id] == IPC_UNUSED) {
  76                         shm_segs[id] = (struct shmid_ds *) IPC_NOID;
  77                         goto found;
  78                 }
  79         return -ENOSPC;
  80 
  81 found:
  82         shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
  83         if (!shp) {
  84                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  85                 if (shm_lock)
  86                         wake_up (&shm_lock);
  87                 return -ENOMEM;
  88         }
  89 
  90         shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
  91         if (!shp->shm_pages) {
  92                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  93                 if (shm_lock)
  94                         wake_up (&shm_lock);
  95                 kfree_s (shp, sizeof (*shp));
  96                 return -ENOMEM;
  97         }
  98 
  99         for (i=0; i< numpages; shp->shm_pages[i++] = 0);
 100         shm_tot += numpages;
 101         shp->shm_perm.key = key;
 102         shp->shm_perm.mode = (shmflg & 0777);
 103         shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
 104         shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
 105         shp->shm_perm.seq = shm_seq;
 106         shp->shm_segsz = size;
 107         shp->shm_cpid = current->pid;
 108         shp->attaches = NULL;
 109         shp->shm_lpid = shp->shm_nattch = 0;
 110         shp->shm_atime = shp->shm_dtime = 0;
 111         shp->shm_ctime = CURRENT_TIME;
 112         shp->shm_npages = numpages;
 113 
 114         if (id > max_shmid)
 115                 max_shmid = id;
 116         shm_segs[id] = shp;
 117         used_segs++;
 118         if (shm_lock)
 119                 wake_up (&shm_lock);
 120         return id + shm_seq*SHMMNI;
 121 }
 122 
 123 int sys_shmget (key_t key, int size, int shmflg)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         struct shmid_ds *shp;
 126         int id = 0;
 127         
 128         if (size < 0 || size > SHMMAX)
 129                 return -EINVAL;
 130         if (key == IPC_PRIVATE) 
 131                 return newseg(key, shmflg, size);
 132         if ((id = findkey (key)) == -1) {
 133                 if (!(shmflg & IPC_CREAT))
 134                         return -ENOENT;
 135                 return newseg(key, shmflg, size);
 136         } 
 137         if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
 138                 return -EEXIST;
 139         shp = shm_segs[id];
 140         if (shp->shm_perm.mode & SHM_DEST)
 141                 return -EIDRM;
 142         if (size > shp->shm_segsz)
 143                 return -EINVAL;
 144         if (ipcperms (&shp->shm_perm, shmflg))
 145                 return -EACCES;
 146         return shp->shm_perm.seq*SHMMNI + id;
 147 }
 148 
 149 /* 
 150  * Only called after testing nattch and SHM_DEST.
 151  * Here pages, pgtable and shmid_ds are freed.
 152  */
 153 static void killseg (int id)
     /* [previous][next][first][last][top][bottom][index][help] */
 154 {
 155         struct shmid_ds *shp;
 156         int i, numpages;
 157         ulong page;
 158 
 159         shp = shm_segs[id];
 160         if (shp == IPC_NOID || shp == IPC_UNUSED) {
 161                 printk ("shm nono: killseg called on unused seg id=%d\n", id);
 162                 return;
 163         }
 164         shp->shm_perm.seq++;     /* for shmat */
 165         numpages = shp->shm_npages; 
 166         if ((int)((++shm_seq + 1) * SHMMNI) < 0)
 167                 shm_seq = 0;
 168         shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
 169         used_segs--;
 170         if (id == max_shmid) 
 171                 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
 172         if (!shp->shm_pages) {
 173                 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
 174                 return;
 175         }
 176         for (i=0; i< numpages ; i++) {
 177                 if (!(page = shp->shm_pages[i]))
 178                         continue;
 179                 if (page & 1) {
 180                         free_page (page & ~0xfff);
 181                         shm_rss--;
 182                 } else {
 183                         swap_free (page);
 184                         shm_swp--;
 185                 }
 186         }
 187         kfree_s (shp->shm_pages, numpages * sizeof (ulong));
 188         shm_tot -= numpages;
 189         kfree_s (shp, sizeof (*shp));
 190         return;
 191 }
 192 
 193 int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         struct shmid_ds *shp, tbuf;
 196         struct ipc_perm *ipcp;
 197         int id, err;
 198         
 199         if (cmd < 0 || shmid < 0)
 200                 return -EINVAL;
 201         if (cmd == IPC_SET) {
 202                 if (!buf)
 203                         return -EFAULT;
 204                 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
 205                 if (err)
 206                         return err;
 207                 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
 208         }
 209 
 210         switch (cmd) { /* replace with proc interface ? */
 211         case IPC_INFO: 
 212         {
 213                 struct shminfo shminfo;
 214                 if (!buf)
 215                         return -EFAULT;
 216                 shminfo.shmmni = SHMMNI;
 217                 shminfo.shmmax = SHMMAX;
 218                 shminfo.shmmin = SHMMIN;
 219                 shminfo.shmall = SHMALL;
 220                 shminfo.shmseg = SHMSEG;
 221                 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
 222                 if (err)
 223                         return err;
 224                 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
 225                 return max_shmid;
 226         }
 227         case SHM_INFO: 
 228         { 
 229                 struct shm_info shm_info;
 230                 if (!buf)
 231                         return -EFAULT;
 232                 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
 233                 if (err)
 234                         return err;
 235                 shm_info.used_ids = used_segs; 
 236                 shm_info.shm_rss = shm_rss;
 237                 shm_info.shm_tot = shm_tot;
 238                 shm_info.shm_swp = shm_swp;
 239                 shm_info.swap_attempts = swap_attempts;
 240                 shm_info.swap_successes = swap_successes;
 241                 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
 242                 return max_shmid;
 243         }
 244         case SHM_STAT:
 245                 if (!buf)
 246                         return -EFAULT;
 247                 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
 248                 if (err)
 249                         return err;
 250                 if (shmid > max_shmid)
 251                         return -EINVAL;
 252                 shp = shm_segs[shmid];
 253                 if (shp == IPC_UNUSED || shp == IPC_NOID)
 254                         return -EINVAL;
 255                 if (ipcperms (&shp->shm_perm, 0444))
 256                         return -EACCES;
 257                 id = shmid + shp->shm_perm.seq * SHMMNI; 
 258                 memcpy_tofs (buf, shp, sizeof(*shp));
 259                 return id;
 260         }
 261         
 262         shp = shm_segs[id = shmid % SHMMNI];
 263         if (shp == IPC_UNUSED || shp == IPC_NOID)
 264                 return -EINVAL;
 265         ipcp = &shp->shm_perm;
 266         if (ipcp->seq != shmid / SHMMNI) 
 267                 return -EIDRM;
 268         
 269         switch (cmd) {
 270         case SHM_UNLOCK:
 271                 if (!suser())
 272                         return -EPERM;
 273                 if (!(ipcp->mode & SHM_LOCKED))
 274                         return -EINVAL;
 275                 ipcp->mode &= ~SHM_LOCKED;
 276                 break;
 277         case SHM_LOCK:
 278 /* Allow superuser to lock segment in memory */
 279 /* Should the pages be faulted in here or leave it to user? */
 280 /* need to determine interaction with current->swappable */
 281                 if (!suser())
 282                         return -EPERM;
 283                 if (ipcp->mode & SHM_LOCKED)
 284                         return -EINVAL;
 285                 ipcp->mode |= SHM_LOCKED;
 286                 break;
 287         case IPC_STAT:
 288                 if (ipcperms (ipcp, 0444))
 289                         return -EACCES;
 290                 if (!buf)
 291                         return -EFAULT;
 292                 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
 293                 if (err)
 294                         return err;
 295                 memcpy_tofs (buf, shp, sizeof(*shp));
 296                 break;
 297         case IPC_SET:
 298                 if (suser() || current->euid == shp->shm_perm.uid ||
 299                     current->euid == shp->shm_perm.cuid) {
 300                         ipcp->uid = tbuf.shm_perm.uid;
 301                         ipcp->gid = tbuf.shm_perm.gid;
 302                         ipcp->mode = (ipcp->mode & ~0777)
 303                                 | (tbuf.shm_perm.mode & 0777);
 304                         shp->shm_ctime = CURRENT_TIME;
 305                         break;
 306                 }
 307                 return -EPERM;
 308         case IPC_RMID:
 309                 if (suser() || current->euid == shp->shm_perm.uid ||
 310                     current->euid == shp->shm_perm.cuid) {
 311                         shp->shm_perm.mode |= SHM_DEST;
 312                         if (shp->shm_nattch <= 0) 
 313                                 killseg (id);
 314                         break;
 315                 }
 316                 return -EPERM;
 317         default:
 318                 return -EINVAL;
 319         }
 320         return 0;
 321 }
 322 
 323 /*
 324  * check range is unmapped, ensure page tables exist
 325  * mark page table entries with shm_sgn.
 326  * if remap != 0 the range is remapped.
 327  */
 328 static int shm_map (struct shm_desc *shmd, int remap)
     /* [previous][next][first][last][top][bottom][index][help] */
 329 {
 330         unsigned long invalid = 0;
 331         unsigned long *page_table;
 332         unsigned long tmp, shm_sgn;
 333         unsigned long page_dir = shmd->task->tss.cr3;
 334         
 335         /* check that the range is unmapped and has page_tables */
 336         for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) { 
 337                 page_table = (ulong *) (page_dir + ((tmp >> 20) & 0xffc));
 338                 if (*page_table & PAGE_PRESENT) {
 339                         page_table = (ulong *) (0xfffff000 & *page_table);
 340                         page_table += ((tmp >> PAGE_SHIFT) & 0x3ff);
 341                         if (*page_table) {
 342                                 if (!remap)
 343                                         return -EINVAL;
 344                                 if (*page_table & PAGE_PRESENT) {
 345                                         --current->rss;
 346                                         free_page (*page_table & ~0xfff);
 347                                 }
 348                                 else
 349                                         swap_free (*page_table);
 350                                 invalid++;
 351                         }
 352                         continue;
 353                 }  
 354               {
 355                 unsigned long new_pt = get_free_page(GFP_KERNEL);
 356                 if (!new_pt)
 357                         return -ENOMEM;
 358                 *page_table = new_pt | PAGE_TABLE;
 359                 tmp = ((tmp + (PAGE_SIZE << 10) - 1) & 0xff400000) -PAGE_SIZE;
 360         }}
 361         if (invalid)
 362                 invalidate();
 363 
 364         /* map page range */
 365         shm_sgn = shmd->shm_sgn;
 366         for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE, 
 367              shm_sgn += (1 << SHM_IDX_SHIFT)) { 
 368                 page_table = (ulong *) (page_dir + ((tmp >> 20) & 0xffc));
 369                 page_table = (ulong *) (0xfffff000 & *page_table);
 370                 page_table += (tmp >> PAGE_SHIFT) & 0x3ff;
 371                 *page_table = shm_sgn;
 372         }
 373         return 0;
 374 }
 375 
 376 /* 
 377  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
 378  * raddr is needed to return addresses above 2Gig.
 379  * Specific attaches are allowed over the executable....
 380  */
 381 int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383         struct shmid_ds *shp;
 384         struct shm_desc *shmd;
 385         int err;
 386         unsigned int id;
 387         unsigned long addr;
 388         
 389         if (shmid < 0)
 390                 return -EINVAL;
 391 
 392         shp = shm_segs[id = shmid % SHMMNI];
 393         if (shp == IPC_UNUSED || shp == IPC_NOID)
 394                 return -EINVAL;
 395 
 396 #define SHM_RANGE_END 0x60000000
 397 #define SHM_RANGE_START 0x40000000
 398 
 399         if (!(addr = (ulong) shmaddr)) {
 400                 if (shmflg & SHM_REMAP)
 401                         return -EINVAL;
 402                 /* set addr below  all current unspecified attaches */
 403                 addr = SHM_RANGE_END; 
 404                 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
 405                         if (shmd->start < SHM_RANGE_START)
 406                                 continue;
 407                         if (addr >= shmd->start)
 408                                 addr = shmd->start;
 409                 }
 410                 addr = (addr - shp->shm_segsz) & ~0xfff;
 411         } else if (addr & (SHMLBA-1)) {
 412                 if (shmflg & SHM_RND) 
 413                         addr &= ~(SHMLBA-1);       /* round down */
 414                 else
 415                         return -EINVAL;
 416         }
 417         if ((addr > current->start_stack - 16384 - PAGE_SIZE*shp->shm_npages))
 418                 return -EINVAL;
 419         if (shmflg & SHM_REMAP)
 420                 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
 421                         if (addr >= shmd->start && addr < shmd->end)
 422                                 return -EINVAL;
 423                         if (addr + shp->shm_segsz >= shmd->start && 
 424                             addr + shp->shm_segsz < shmd->end)
 425                                 return -EINVAL;
 426                 }
 427 
 428         if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? 0444 : 0666))
 429                 return -EACCES;
 430         if (shp->shm_perm.seq != shmid / SHMMNI) 
 431                 return -EIDRM;
 432 
 433         shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL);
 434         if (!shmd)
 435                 return -ENOMEM;
 436         if ((shp != shm_segs[id]) || (shp->shm_perm.seq != shmid / SHMMNI)) {
 437                 kfree_s (shmd, sizeof (*shmd));
 438                 return -EIDRM;
 439         }
 440         shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
 441                 (shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
 442         shmd->start = addr;
 443         shmd->end = addr + shp->shm_npages * PAGE_SIZE;
 444         shmd->task = current;
 445 
 446         shp->shm_nattch++;            /* prevent destruction */
 447         if (addr < current->end_data) {
 448                 iput (current->executable);
 449                 current->executable = NULL;
 450 /*              current->end_data = current->end_code = 0; */
 451         }
 452 
 453         if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
 454                 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
 455                         killseg(id);
 456                 kfree_s (shmd, sizeof (*shmd));
 457                 return err;
 458         }
 459                 
 460         shmd->task_next = current->shm;
 461         current->shm = shmd;
 462         shmd->seg_next = shp->attaches;
 463         shp->attaches = shmd;
 464         shp->shm_lpid = current->pid;
 465         shp->shm_atime = CURRENT_TIME;
 466         put_fs_long (addr, raddr);
 467         return 0;
 468 }
 469 
 470 /*
 471  * remove the first attach descriptor from the list *shmdp.
 472  * free memory for segment if it is marked destroyed.
 473  * The descriptor is detached before the sleep in unmap_page_range.
 474  */
 475 static void detach (struct shm_desc **shmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 476 {
 477         struct shm_desc *shmd = *shmdp; 
 478         struct shmid_ds *shp;
 479         int id;
 480         
 481         id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
 482         shp = shm_segs[id];
 483         *shmdp = shmd->task_next;
 484         for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->seg_next)
 485                 if (*shmdp == shmd) {
 486                         *shmdp = shmd->seg_next; 
 487                         goto found; 
 488                 }
 489         printk("detach: shm segment (id=%d) attach list inconsistent\n",id);
 490         
 491  found:
 492         unmap_page_range (shmd->start, shp->shm_segsz); /* sleeps */
 493         kfree_s (shmd, sizeof (*shmd));
 494         shp->shm_lpid = current->pid;
 495         shp->shm_dtime = CURRENT_TIME;
 496         if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
 497                 killseg (id); /* sleeps */
 498         return;
 499 }
 500 
 501 /*
 502  * detach and kill segment if marked destroyed.
 503  * The work is done in detach.
 504  */
 505 int sys_shmdt (char *shmaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 506 {
 507         struct shm_desc *shmd, **shmdp; 
 508         
 509         for (shmdp = &current->shm; (shmd = *shmdp); shmdp=&shmd->task_next) { 
 510                 if (shmd->start == (ulong) shmaddr) {
 511                         detach (shmdp);
 512                         return 0;
 513                 }
 514         }
 515         return -EINVAL;
 516 }
 517 
 518 /* 
 519  * detach all attached segments. 
 520  */
 521 void shm_exit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 522 {
 523         while (current->shm) 
 524                 detach(&current->shm);
 525         return;
 526 }
 527 
 528 /* 
 529  * copy the parent shm descriptors and update nattch
 530  * parent is stuck in fork so an attach on each segment is assured.
 531  * copy_page_tables does the mapping.
 532  */
 533 int shm_fork (struct task_struct *p1, struct task_struct *p2)
     /* [previous][next][first][last][top][bottom][index][help] */
 534 {
 535         struct shm_desc *shmd, *new_desc = NULL, *tmp;
 536         struct shmid_ds *shp;
 537         int id;
 538 
 539         if (!p1->shm)
 540                 return 0;
 541         for (shmd = p1->shm; shmd; shmd = shmd->task_next) {
 542                 tmp = (struct shm_desc *) kmalloc(sizeof(*tmp), GFP_KERNEL);
 543                 if (!tmp) {
 544                         while (new_desc) { 
 545                                 tmp = new_desc->task_next; 
 546                                 kfree_s (new_desc, sizeof (*new_desc)); 
 547                                 new_desc = tmp; 
 548                         }
 549                         free_page_tables (p2);
 550                         return -ENOMEM;
 551                 }
 552                 *tmp = *shmd;
 553                 tmp->task = p2;
 554                 tmp->task_next = new_desc;
 555                 new_desc = tmp;
 556         }
 557         p2->shm = new_desc;
 558         for (shmd = new_desc; shmd; shmd = shmd->task_next) {
 559                 id = (shmd->shm_sgn >> SHM_ID_SHIFT) & 0xfff;
 560                 shp = shm_segs[id];
 561                 if (shp == IPC_UNUSED) {
 562                         printk("shm_fork: unused id=%d PANIC\n", id);
 563                         return -ENOMEM;
 564                 }
 565                 shmd->seg_next = shp->attaches;
 566                 shp->attaches = shmd;
 567                 shp->shm_nattch++;
 568                 shp->shm_atime = CURRENT_TIME;
 569                 shp->shm_lpid = current->pid;
 570         }
 571         return 0;
 572 }
 573 
 574 /*
 575  * page not present ... go through shm_pages .. called from swap_in()
 576  */
 577 void shm_no_page (unsigned long *ptent)
     /* [previous][next][first][last][top][bottom][index][help] */
 578 {
 579         unsigned long page;
 580         unsigned long code = *ptent;
 581         struct shmid_ds *shp;
 582         unsigned int id, idx;
 583 
 584         id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
 585         if (id > max_shmid) {
 586                 printk ("shm_no_page: id=%d too big. proc mem corruptedn", id);
 587                 return;
 588         }
 589         shp = shm_segs[id];
 590         if (shp == IPC_UNUSED || shp == IPC_NOID) {
 591                 printk ("shm_no_page: id=%d invalid. Race.\n", id);
 592                 return;
 593         }
 594         idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
 595         if (idx >= shp->shm_npages) {
 596                 printk ("shm_no_page : too large page index. id=%d\n", id);
 597                 return;
 598         }
 599 
 600         if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
 601                 page = get_free_page(GFP_KERNEL);
 602                 if (!page) {
 603                         oom(current);
 604                         *ptent = BAD_PAGE | PAGE_ACCESSED | 7;
 605                         return;
 606                 }
 607                 if (shp->shm_pages[idx] & PAGE_PRESENT) {
 608                         free_page (page);
 609                         goto done;
 610                 }
 611                 if (shp->shm_pages[idx]) {
 612                         read_swap_page (shp->shm_pages[idx], (char *) page);
 613                         if (shp->shm_pages[idx] & PAGE_PRESENT)  {
 614                                 free_page (page);
 615                                 goto done;
 616                         }
 617                         swap_free (shp->shm_pages[idx]);
 618                         shm_swp--;
 619                 }
 620                 shm_rss++;
 621                 shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
 622         } else 
 623                 --current->maj_flt;  /* was incremented in do_no_page */
 624 
 625 done:
 626         current->min_flt++;
 627         page = shp->shm_pages[idx];
 628         if (code & SHM_READ_ONLY)           /* write-protect */
 629                 page &= ~2;
 630         mem_map[MAP_NR(page)]++;
 631         *ptent = page;
 632         return;
 633 }
 634 
 635 /*
 636  * Goes through counter = (shm_rss << prio) present shm pages. 
 637  */
 638 static unsigned long swap_id = 0; /* currently being swapped */
 639 static unsigned long swap_idx = 0; /* next to swap */
 640 
 641 int shm_swap (int prio)
     /* [previous][next][first][last][top][bottom][index][help] */
 642 {
 643         unsigned long page;
 644         struct shmid_ds *shp;
 645         struct shm_desc *shmd;
 646         unsigned int swap_nr;
 647         unsigned long id, idx, invalid = 0;
 648         int counter;
 649 
 650         counter = shm_rss >> prio;
 651         if (!counter || !(swap_nr = get_swap_page()))
 652                 return 0;
 653 
 654  check_id:
 655         shp = shm_segs[swap_id];
 656         if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
 657                 swap_idx = 0; 
 658                 if (++swap_id > max_shmid)
 659                         swap_id = 0;
 660                 goto check_id;
 661         }
 662         id = swap_id;
 663 
 664  check_table:
 665         idx = swap_idx++; 
 666         if (idx  >= shp->shm_npages) { 
 667                 swap_idx = 0;
 668                 if (++swap_id > max_shmid)
 669                         swap_id = 0;
 670                 goto check_id;
 671         }
 672 
 673         page = shp->shm_pages[idx];
 674         if (!(page & PAGE_PRESENT))
 675                 goto check_table;
 676         swap_attempts++;
 677 
 678         if (--counter < 0) { /* failed */
 679                 if (invalid)
 680                         invalidate();
 681                 swap_free (swap_nr);
 682                 return 0;
 683         }
 684         for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) {
 685                 unsigned long tmp, *pte;
 686                 if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
 687                         printk ("shm_swap: id=%d does not match shmd\n", id);
 688                         continue;
 689                 }
 690                 tmp = shmd->start + (idx << PAGE_SHIFT);
 691                 if (tmp >= shmd->end) {
 692                         printk ("shm_swap: too large idx=%d id=%d PANIC\n",idx, id);
 693                         continue;
 694                 }
 695                 pte = (ulong *) (shmd->task->tss.cr3 + ((tmp>>20) & 0xffc));
 696                 if (!(*pte & 1)) { 
 697                         printk("shm_swap: bad pgtbl! id=%d start=%x idx=%d\n", 
 698                                         id, shmd->start, idx);
 699                         *pte = 0;
 700                         continue;
 701                 } 
 702                 pte = (ulong *) (0xfffff000 & *pte);
 703                 pte += ((tmp >> PAGE_SHIFT) & 0x3ff);
 704                 tmp = *pte;
 705                 if (!(tmp & PAGE_PRESENT))
 706                         continue;
 707                 if (tmp & PAGE_ACCESSED) {
 708                         *pte &= ~PAGE_ACCESSED;
 709                         continue;  
 710                 }
 711                 tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT;
 712                 *pte = tmp;
 713                 mem_map[MAP_NR(page)]--;
 714                 shmd->task->rss--;
 715                 invalid++;
 716         }
 717 
 718         if (mem_map[MAP_NR(page)] != 1) 
 719                 goto check_table;
 720         page &= ~0xfff;
 721         shp->shm_pages[idx] = swap_nr;
 722         if (invalid)
 723                 invalidate();
 724         write_swap_page (swap_nr, (char *) page);
 725         free_page (page);
 726         swap_successes++;
 727         shm_swp++;
 728         shm_rss--;
 729         return 1;
 730 }

/* [previous][next][first][last][top][bottom][index][help] */