root/ipc/shm.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. shm_init
  2. findkey
  3. newseg
  4. sys_shmget
  5. killseg
  6. sys_shmctl
  7. shm_map
  8. sys_shmat
  9. detach
  10. sys_shmdt
  11. shm_exit
  12. shm_fork
  13. shm_no_page
  14. shm_swap

   1 /*
   2  * linux/ipc/shm.c
   3  * Copyright (C) 1992, 1993 Krishna Balasubramanian 
   4  *         Many improvements/fixes by Bruno Haible.
   5  * assume user segments start at 0x0
   6  */
   7 
   8 #include <linux/errno.h>
   9 #include <asm/segment.h>
  10 #include <linux/sched.h>
  11 #include <linux/ipc.h> 
  12 #include <linux/shm.h>
  13 #include <linux/stat.h>
  14 
  15 extern int ipcperms (struct ipc_perm *ipcp, short semflg);
  16 extern unsigned int get_swap_page(void);
  17 static int findkey (key_t key);
  18 static int newseg (key_t key, int shmflg, int size);
  19 static int shm_map (struct shm_desc *shmd, int remap);
  20 static void killseg (int id);
  21 
  22 static int shm_tot = 0;  /* total number of shared memory pages */
  23 static int shm_rss = 0; /* number of shared memory pages that are in memory */
  24 static int shm_swp = 0; /* number of shared memory pages that are in swap */
  25 static int shm_seq = 0; /* is incremented, for recognizing stale ids */
  26 static int max_shmid = 0; /* every used id is <= max_shmid */
  27 static struct wait_queue *shm_lock = NULL;
  28 static struct shmid_ds *shm_segs[SHMMNI];
  29 
  30 /* some statistics */
  31 static ulong swap_attempts = 0;
  32 static ulong swap_successes = 0;
  33 static ulong used_segs = 0;
  34 
  35 void shm_init (void)
     /* [previous][next][first][last][top][bottom][index][help] */
  36 {
  37         int id;
  38     
  39         for (id = 0; id < SHMMNI; id++) 
  40                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  41         shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
  42         shm_lock = NULL;
  43         return;
  44 }
  45 
  46 static int findkey (key_t key)    
     /* [previous][next][first][last][top][bottom][index][help] */
  47 {
  48         int id;
  49         struct shmid_ds *shp;
  50         
  51         for (id=0; id <= max_shmid; id++) {
  52                 while ((shp = shm_segs[id]) == IPC_NOID) 
  53                         sleep_on (&shm_lock);
  54                 if (shp == IPC_UNUSED)
  55                         continue;
  56                 if (key == shp->shm_perm.key) 
  57                         return id;
  58         }
  59         return -1;
  60 }
  61 
  62 /* 
  63  * allocate new shmid_ds and pgtable. protected by shm_segs[id] = NOID.
  64  */
  65 static int newseg (key_t key, int shmflg, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
  66 {
  67         struct shmid_ds *shp;
  68         int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  69         int id, i;
  70 
  71         if (size < SHMMIN)
  72                 return -EINVAL;
  73         if (shm_tot + numpages >= SHMALL)
  74                 return -ENOSPC;
  75         for (id=0; id < SHMMNI; id++)
  76                 if (shm_segs[id] == IPC_UNUSED) {
  77                         shm_segs[id] = (struct shmid_ds *) IPC_NOID;
  78                         goto found;
  79                 }
  80         return -ENOSPC;
  81 
  82 found:
  83         shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
  84         if (!shp) {
  85                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  86                 if (shm_lock)
  87                         wake_up (&shm_lock);
  88                 return -ENOMEM;
  89         }
  90 
  91         shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
  92         if (!shp->shm_pages) {
  93                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  94                 if (shm_lock)
  95                         wake_up (&shm_lock);
  96                 kfree_s (shp, sizeof (*shp));
  97                 return -ENOMEM;
  98         }
  99 
 100         for (i=0; i< numpages; shp->shm_pages[i++] = 0);
 101         shm_tot += numpages;
 102         shp->shm_perm.key = key;
 103         shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 104         shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
 105         shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
 106         shp->shm_perm.seq = shm_seq;
 107         shp->shm_segsz = size;
 108         shp->shm_cpid = current->pid;
 109         shp->attaches = NULL;
 110         shp->shm_lpid = shp->shm_nattch = 0;
 111         shp->shm_atime = shp->shm_dtime = 0;
 112         shp->shm_ctime = CURRENT_TIME;
 113         shp->shm_npages = numpages;
 114 
 115         if (id > max_shmid)
 116                 max_shmid = id;
 117         shm_segs[id] = shp;
 118         used_segs++;
 119         if (shm_lock)
 120                 wake_up (&shm_lock);
 121         return id + shm_seq*SHMMNI;
 122 }
 123 
 124 int sys_shmget (key_t key, int size, int shmflg)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct shmid_ds *shp;
 127         int id = 0;
 128         
 129         if (size < 0 || size > SHMMAX)
 130                 return -EINVAL;
 131         if (key == IPC_PRIVATE) 
 132                 return newseg(key, shmflg, size);
 133         if ((id = findkey (key)) == -1) {
 134                 if (!(shmflg & IPC_CREAT))
 135                         return -ENOENT;
 136                 return newseg(key, shmflg, size);
 137         } 
 138         if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
 139                 return -EEXIST;
 140         shp = shm_segs[id];
 141         if (shp->shm_perm.mode & SHM_DEST)
 142                 return -EIDRM;
 143         if (size > shp->shm_segsz)
 144                 return -EINVAL;
 145         if (ipcperms (&shp->shm_perm, shmflg))
 146                 return -EACCES;
 147         return shp->shm_perm.seq*SHMMNI + id;
 148 }
 149 
 150 /* 
 151  * Only called after testing nattch and SHM_DEST.
 152  * Here pages, pgtable and shmid_ds are freed.
 153  */
 154 static void killseg (int id)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         struct shmid_ds *shp;
 157         int i, numpages;
 158         ulong page;
 159 
 160         shp = shm_segs[id];
 161         if (shp == IPC_NOID || shp == IPC_UNUSED) {
 162                 printk ("shm nono: killseg called on unused seg id=%d\n", id);
 163                 return;
 164         }
 165         shp->shm_perm.seq++;     /* for shmat */
 166         numpages = shp->shm_npages; 
 167         if ((int)((++shm_seq + 1) * SHMMNI) < 0)
 168                 shm_seq = 0;
 169         shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
 170         used_segs--;
 171         if (id == max_shmid) 
 172                 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
 173         if (!shp->shm_pages) {
 174                 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
 175                 return;
 176         }
 177         for (i=0; i< numpages ; i++) {
 178                 if (!(page = shp->shm_pages[i]))
 179                         continue;
 180                 if (page & 1) {
 181                         free_page (page & PAGE_MASK);
 182                         shm_rss--;
 183                 } else {
 184                         swap_free (page);
 185                         shm_swp--;
 186                 }
 187         }
 188         kfree_s (shp->shm_pages, numpages * sizeof (ulong));
 189         shm_tot -= numpages;
 190         kfree_s (shp, sizeof (*shp));
 191         return;
 192 }
 193 
 194 int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         struct shmid_ds *shp, tbuf;
 197         struct ipc_perm *ipcp;
 198         int id, err;
 199         
 200         if (cmd < 0 || shmid < 0)
 201                 return -EINVAL;
 202         if (cmd == IPC_SET) {
 203                 if (!buf)
 204                         return -EFAULT;
 205                 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
 206                 if (err)
 207                         return err;
 208                 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
 209         }
 210 
 211         switch (cmd) { /* replace with proc interface ? */
 212         case IPC_INFO: 
 213         {
 214                 struct shminfo shminfo;
 215                 if (!buf)
 216                         return -EFAULT;
 217                 shminfo.shmmni = SHMMNI;
 218                 shminfo.shmmax = SHMMAX;
 219                 shminfo.shmmin = SHMMIN;
 220                 shminfo.shmall = SHMALL;
 221                 shminfo.shmseg = SHMSEG;
 222                 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
 223                 if (err)
 224                         return err;
 225                 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
 226                 return max_shmid;
 227         }
 228         case SHM_INFO: 
 229         { 
 230                 struct shm_info shm_info;
 231                 if (!buf)
 232                         return -EFAULT;
 233                 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
 234                 if (err)
 235                         return err;
 236                 shm_info.used_ids = used_segs; 
 237                 shm_info.shm_rss = shm_rss;
 238                 shm_info.shm_tot = shm_tot;
 239                 shm_info.shm_swp = shm_swp;
 240                 shm_info.swap_attempts = swap_attempts;
 241                 shm_info.swap_successes = swap_successes;
 242                 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
 243                 return max_shmid;
 244         }
 245         case SHM_STAT:
 246                 if (!buf)
 247                         return -EFAULT;
 248                 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
 249                 if (err)
 250                         return err;
 251                 if (shmid > max_shmid)
 252                         return -EINVAL;
 253                 shp = shm_segs[shmid];
 254                 if (shp == IPC_UNUSED || shp == IPC_NOID)
 255                         return -EINVAL;
 256                 if (ipcperms (&shp->shm_perm, S_IRUGO))
 257                         return -EACCES;
 258                 id = shmid + shp->shm_perm.seq * SHMMNI; 
 259                 memcpy_tofs (buf, shp, sizeof(*shp));
 260                 return id;
 261         }
 262         
 263         shp = shm_segs[id = shmid % SHMMNI];
 264         if (shp == IPC_UNUSED || shp == IPC_NOID)
 265                 return -EINVAL;
 266         ipcp = &shp->shm_perm;
 267         if (ipcp->seq != shmid / SHMMNI) 
 268                 return -EIDRM;
 269         
 270         switch (cmd) {
 271         case SHM_UNLOCK:
 272                 if (!suser())
 273                         return -EPERM;
 274                 if (!(ipcp->mode & SHM_LOCKED))
 275                         return -EINVAL;
 276                 ipcp->mode &= ~SHM_LOCKED;
 277                 break;
 278         case SHM_LOCK:
 279 /* Allow superuser to lock segment in memory */
 280 /* Should the pages be faulted in here or leave it to user? */
 281 /* need to determine interaction with current->swappable */
 282                 if (!suser())
 283                         return -EPERM;
 284                 if (ipcp->mode & SHM_LOCKED)
 285                         return -EINVAL;
 286                 ipcp->mode |= SHM_LOCKED;
 287                 break;
 288         case IPC_STAT:
 289                 if (ipcperms (ipcp, S_IRUGO))
 290                         return -EACCES;
 291                 if (!buf)
 292                         return -EFAULT;
 293                 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
 294                 if (err)
 295                         return err;
 296                 memcpy_tofs (buf, shp, sizeof(*shp));
 297                 break;
 298         case IPC_SET:
 299                 if (suser() || current->euid == shp->shm_perm.uid ||
 300                     current->euid == shp->shm_perm.cuid) {
 301                         ipcp->uid = tbuf.shm_perm.uid;
 302                         ipcp->gid = tbuf.shm_perm.gid;
 303                         ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
 304                                 | (tbuf.shm_perm.mode & S_IRWXUGO);
 305                         shp->shm_ctime = CURRENT_TIME;
 306                         break;
 307                 }
 308                 return -EPERM;
 309         case IPC_RMID:
 310                 if (suser() || current->euid == shp->shm_perm.uid ||
 311                     current->euid == shp->shm_perm.cuid) {
 312                         shp->shm_perm.mode |= SHM_DEST;
 313                         if (shp->shm_nattch <= 0) 
 314                                 killseg (id);
 315                         break;
 316                 }
 317                 return -EPERM;
 318         default:
 319                 return -EINVAL;
 320         }
 321         return 0;
 322 }
 323 
 324 /*
 325  * check range is unmapped, ensure page tables exist
 326  * mark page table entries with shm_sgn.
 327  * if remap != 0 the range is remapped.
 328  */
 329 static int shm_map (struct shm_desc *shmd, int remap)
     /* [previous][next][first][last][top][bottom][index][help] */
 330 {
 331         unsigned long invalid = 0;
 332         unsigned long *page_table;
 333         unsigned long tmp, shm_sgn;
 334         unsigned long page_dir = shmd->task->tss.cr3;
 335         
 336         /* check that the range is unmapped and has page_tables */
 337         for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) { 
 338                 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
 339                 if (*page_table & PAGE_PRESENT) {
 340                         page_table = (ulong *) (PAGE_MASK & *page_table);
 341                         page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
 342                         if (*page_table) {
 343                                 if (!remap)
 344                                         return -EINVAL;
 345                                 if (*page_table & PAGE_PRESENT) {
 346                                         --current->rss;
 347                                         free_page (*page_table & PAGE_MASK);
 348                                 }
 349                                 else
 350                                         swap_free (*page_table);
 351                                 invalid++;
 352                         }
 353                         continue;
 354                 }  
 355               {
 356                 unsigned long new_pt;
 357                 if(!(new_pt = get_free_page(GFP_KERNEL)))       /* clearing needed?  SRB. */
 358                         return -ENOMEM;
 359                 *page_table = new_pt | PAGE_TABLE;
 360                 tmp |= ((PAGE_SIZE << 10) - PAGE_SIZE);
 361         }}
 362         if (invalid)
 363                 invalidate();
 364 
 365         /* map page range */
 366         shm_sgn = shmd->shm_sgn;
 367         for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE, 
 368              shm_sgn += (1 << SHM_IDX_SHIFT)) { 
 369                 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
 370                 page_table = (ulong *) (PAGE_MASK & *page_table);
 371                 page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 372                 *page_table = shm_sgn;
 373         }
 374         return 0;
 375 }
 376 
 377 /* 
 378  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
 379  * raddr is needed to return addresses above 2Gig.
 380  * Specific attaches are allowed over the executable....
 381  */
 382 int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 383 {
 384         struct shmid_ds *shp;
 385         struct shm_desc *shmd;
 386         int err;
 387         unsigned int id;
 388         unsigned long addr;
 389         
 390         if (shmid < 0)
 391                 return -EINVAL;
 392 
 393         shp = shm_segs[id = shmid % SHMMNI];
 394         if (shp == IPC_UNUSED || shp == IPC_NOID)
 395                 return -EINVAL;
 396 
 397         if (!(addr = (ulong) shmaddr)) {
 398                 if (shmflg & SHM_REMAP)
 399                         return -EINVAL;
 400                 /* set addr below  all current unspecified attaches */
 401                 addr = SHM_RANGE_END; 
 402                 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
 403                         if (shmd->start < SHM_RANGE_START)
 404                                 continue;
 405                         if (addr >= shmd->start)
 406                                 addr = shmd->start;
 407                 }
 408                 addr = (addr - shp->shm_segsz) & PAGE_MASK;
 409         } else if (addr & (SHMLBA-1)) {
 410                 if (shmflg & SHM_RND) 
 411                         addr &= ~(SHMLBA-1);       /* round down */
 412                 else
 413                         return -EINVAL;
 414         }
 415         if ((addr > current->start_stack - 16384 - PAGE_SIZE*shp->shm_npages))
 416                 return -EINVAL;
 417         if (shmflg & SHM_REMAP)
 418                 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
 419                         if (addr >= shmd->start && addr < shmd->end)
 420                                 return -EINVAL;
 421                         if (addr + shp->shm_segsz >= shmd->start && 
 422                             addr + shp->shm_segsz < shmd->end)
 423                                 return -EINVAL;
 424                 }
 425 
 426         if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
 427                 return -EACCES;
 428         if (shp->shm_perm.seq != shmid / SHMMNI) 
 429                 return -EIDRM;
 430 
 431         shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL);
 432         if (!shmd)
 433                 return -ENOMEM;
 434         if ((shp != shm_segs[id]) || (shp->shm_perm.seq != shmid / SHMMNI)) {
 435                 kfree_s (shmd, sizeof (*shmd));
 436                 return -EIDRM;
 437         }
 438         shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
 439                 (shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
 440         shmd->start = addr;
 441         shmd->end = addr + shp->shm_npages * PAGE_SIZE;
 442         shmd->task = current;
 443 
 444         shp->shm_nattch++;            /* prevent destruction */
 445         if (addr < current->end_data) {
 446                 iput (current->executable);
 447                 current->executable = NULL;
 448 /*              current->end_data = current->end_code = 0; */
 449         }
 450 
 451         if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
 452                 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
 453                         killseg(id);
 454                 kfree_s (shmd, sizeof (*shmd));
 455                 return err;
 456         }
 457                 
 458         shmd->task_next = current->shm;
 459         current->shm = shmd;
 460         shmd->seg_next = shp->attaches;
 461         shp->attaches = shmd;
 462         shp->shm_lpid = current->pid;
 463         shp->shm_atime = CURRENT_TIME;
 464         put_fs_long (addr, raddr);
 465         return 0;
 466 }
 467 
 468 /*
 469  * remove the first attach descriptor from the list *shmdp.
 470  * free memory for segment if it is marked destroyed.
 471  * The descriptor is detached before the sleep in unmap_page_range.
 472  */
 473 static void detach (struct shm_desc **shmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 474 {
 475         struct shm_desc *shmd = *shmdp; 
 476         struct shmid_ds *shp;
 477         int id;
 478         
 479         id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
 480         shp = shm_segs[id];
 481         *shmdp = shmd->task_next;
 482         for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->seg_next)
 483                 if (*shmdp == shmd) {
 484                         *shmdp = shmd->seg_next; 
 485                         goto found; 
 486                 }
 487         printk("detach: shm segment (id=%d) attach list inconsistent\n",id);
 488         
 489  found:
 490         unmap_page_range (shmd->start, shp->shm_segsz); /* sleeps */
 491         kfree_s (shmd, sizeof (*shmd));
 492         shp->shm_lpid = current->pid;
 493         shp->shm_dtime = CURRENT_TIME;
 494         if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
 495                 killseg (id); /* sleeps */
 496         return;
 497 }
 498 
 499 /*
 500  * detach and kill segment if marked destroyed.
 501  * The work is done in detach.
 502  */
 503 int sys_shmdt (char *shmaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 504 {
 505         struct shm_desc *shmd, **shmdp; 
 506         
 507         for (shmdp = &current->shm; (shmd = *shmdp); shmdp=&shmd->task_next) { 
 508                 if (shmd->start == (ulong) shmaddr) {
 509                         detach (shmdp);
 510                         return 0;
 511                 }
 512         }
 513         return -EINVAL;
 514 }
 515 
 516 /* 
 517  * detach all attached segments. 
 518  */
 519 void shm_exit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 520 {
 521         while (current->shm) 
 522                 detach(&current->shm);
 523         return;
 524 }
 525 
 526 /* 
 527  * copy the parent shm descriptors and update nattch
 528  * parent is stuck in fork so an attach on each segment is assured.
 529  * copy_page_tables does the mapping.
 530  */
 531 int shm_fork (struct task_struct *p1, struct task_struct *p2)
     /* [previous][next][first][last][top][bottom][index][help] */
 532 {
 533         struct shm_desc *shmd, *new_desc = NULL, *tmp;
 534         struct shmid_ds *shp;
 535         int id;
 536 
 537         if (!p1->shm)
 538                 return 0;
 539         for (shmd = p1->shm; shmd; shmd = shmd->task_next) {
 540                 tmp = (struct shm_desc *) kmalloc(sizeof(*tmp), GFP_KERNEL);
 541                 if (!tmp) {
 542                         while (new_desc) { 
 543                                 tmp = new_desc->task_next; 
 544                                 kfree_s (new_desc, sizeof (*new_desc)); 
 545                                 new_desc = tmp; 
 546                         }
 547                         free_page_tables (p2);
 548                         return -ENOMEM;
 549                 }
 550                 *tmp = *shmd;
 551                 tmp->task = p2;
 552                 tmp->task_next = new_desc;
 553                 new_desc = tmp;
 554         }
 555         p2->shm = new_desc;
 556         for (shmd = new_desc; shmd; shmd = shmd->task_next) {
 557                 id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
 558                 shp = shm_segs[id];
 559                 if (shp == IPC_UNUSED) {
 560                         printk("shm_fork: unused id=%d PANIC\n", id);
 561                         return -ENOMEM;
 562                 }
 563                 shmd->seg_next = shp->attaches;
 564                 shp->attaches = shmd;
 565                 shp->shm_nattch++;
 566                 shp->shm_atime = CURRENT_TIME;
 567                 shp->shm_lpid = current->pid;
 568         }
 569         return 0;
 570 }
 571 
 572 /*
 573  * page not present ... go through shm_pages .. called from swap_in()
 574  */
 575 void shm_no_page (unsigned long *ptent)
     /* [previous][next][first][last][top][bottom][index][help] */
 576 {
 577         unsigned long page;
 578         unsigned long code = *ptent;
 579         struct shmid_ds *shp;
 580         unsigned int id, idx;
 581 
 582         id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
 583         if (id > max_shmid) {
 584                 printk ("shm_no_page: id=%d too big. proc mem corruptedn", id);
 585                 return;
 586         }
 587         shp = shm_segs[id];
 588         if (shp == IPC_UNUSED || shp == IPC_NOID) {
 589                 printk ("shm_no_page: id=%d invalid. Race.\n", id);
 590                 return;
 591         }
 592         idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
 593         if (idx >= shp->shm_npages) {
 594                 printk ("shm_no_page : too large page index. id=%d\n", id);
 595                 return;
 596         }
 597 
 598         if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
 599                 if(!(page = __get_free_page(GFP_KERNEL))) {
 600                         oom(current);
 601                         *ptent = BAD_PAGE | PAGE_ACCESSED | 7;
 602                         return;
 603                 }
 604                 if (shp->shm_pages[idx] & PAGE_PRESENT) {
 605                         free_page (page);
 606                         goto done;
 607                 }
 608                 if (shp->shm_pages[idx]) {
 609                         read_swap_page (shp->shm_pages[idx], (char *) page);
 610                         if (shp->shm_pages[idx] & PAGE_PRESENT)  {
 611                                 free_page (page);
 612                                 goto done;
 613                         }
 614                         swap_free (shp->shm_pages[idx]);
 615                         shm_swp--;
 616                 }
 617                 shm_rss++;
 618                 shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
 619         } else 
 620                 --current->maj_flt;  /* was incremented in do_no_page */
 621 
 622 done:
 623         current->min_flt++;
 624         page = shp->shm_pages[idx];
 625         if (code & SHM_READ_ONLY)           /* write-protect */
 626                 page &= ~2;
 627         mem_map[MAP_NR(page)]++;
 628         *ptent = page;
 629         return;
 630 }
 631 
 632 /*
 633  * Goes through counter = (shm_rss << prio) present shm pages. 
 634  */
 635 static unsigned long swap_id = 0; /* currently being swapped */
 636 static unsigned long swap_idx = 0; /* next to swap */
 637 
 638 int shm_swap (int prio)
     /* [previous][next][first][last][top][bottom][index][help] */
 639 {
 640         unsigned long page;
 641         struct shmid_ds *shp;
 642         struct shm_desc *shmd;
 643         unsigned int swap_nr;
 644         unsigned long id, idx, invalid = 0;
 645         int counter;
 646 
 647         counter = shm_rss >> prio;
 648         if (!counter || !(swap_nr = get_swap_page()))
 649                 return 0;
 650 
 651  check_id:
 652         shp = shm_segs[swap_id];
 653         if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
 654                 swap_idx = 0; 
 655                 if (++swap_id > max_shmid)
 656                         swap_id = 0;
 657                 goto check_id;
 658         }
 659         id = swap_id;
 660 
 661  check_table:
 662         idx = swap_idx++; 
 663         if (idx  >= shp->shm_npages) { 
 664                 swap_idx = 0;
 665                 if (++swap_id > max_shmid)
 666                         swap_id = 0;
 667                 goto check_id;
 668         }
 669 
 670         page = shp->shm_pages[idx];
 671         if (!(page & PAGE_PRESENT))
 672                 goto check_table;
 673         swap_attempts++;
 674 
 675         if (--counter < 0) { /* failed */
 676                 if (invalid)
 677                         invalidate();
 678                 swap_free (swap_nr);
 679                 return 0;
 680         }
 681         for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) {
 682                 unsigned long tmp, *pte;
 683                 if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
 684                         printk ("shm_swap: id=%d does not match shmd\n", id);
 685                         continue;
 686                 }
 687                 tmp = shmd->start + (idx << PAGE_SHIFT);
 688                 if (tmp >= shmd->end) {
 689                         printk ("shm_swap: too large idx=%d id=%d PANIC\n",idx, id);
 690                         continue;
 691                 }
 692                 pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp);
 693                 if (!(*pte & 1)) { 
 694                         printk("shm_swap: bad pgtbl! id=%d start=%x idx=%d\n", 
 695                                         id, shmd->start, idx);
 696                         *pte = 0;
 697                         continue;
 698                 } 
 699                 pte = (ulong *) (PAGE_MASK & *pte);
 700                 pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
 701                 tmp = *pte;
 702                 if (!(tmp & PAGE_PRESENT))
 703                         continue;
 704                 if (tmp & PAGE_ACCESSED) {
 705                         *pte &= ~PAGE_ACCESSED;
 706                         continue;  
 707                 }
 708                 tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT;
 709                 *pte = tmp;
 710                 mem_map[MAP_NR(page)]--;
 711                 shmd->task->rss--;
 712                 invalid++;
 713         }
 714 
 715         if (mem_map[MAP_NR(page)] != 1) 
 716                 goto check_table;
 717         page &= PAGE_MASK;
 718         shp->shm_pages[idx] = swap_nr;
 719         if (invalid)
 720                 invalidate();
 721         write_swap_page (swap_nr, (char *) page);
 722         free_page (page);
 723         swap_successes++;
 724         shm_swp++;
 725         shm_rss--;
 726         return 1;
 727 }

/* [previous][next][first][last][top][bottom][index][help] */