root/ipc/shm.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. shm_init
  2. findkey
  3. newseg
  4. sys_shmget
  5. killseg
  6. sys_shmctl
  7. shm_map
  8. sys_shmat
  9. detach
  10. sys_shmdt
  11. shm_exit
  12. shm_fork
  13. shm_no_page
  14. shm_swap

   1 /*
   2  * linux/ipc/shm.c
   3  * Copyright (C) 1992, 1993 Krishna Balasubramanian 
   4  *         Many improvements/fixes by Bruno Haible.
   5  * assume user segments start at 0x0
   6  */
   7 
   8 #include <linux/errno.h>
   9 #include <asm/segment.h>
  10 #include <linux/sched.h>
  11 #include <linux/ipc.h> 
  12 #include <linux/shm.h>
  13 #include <linux/stat.h>
  14 #include <linux/malloc.h>
  15 
  16 extern int ipcperms (struct ipc_perm *ipcp, short semflg);
  17 extern unsigned int get_swap_page(void);
  18 static int findkey (key_t key);
  19 static int newseg (key_t key, int shmflg, int size);
  20 static int shm_map (struct shm_desc *shmd, int remap);
  21 static void killseg (int id);
  22 
  23 static int shm_tot = 0;  /* total number of shared memory pages */
  24 static int shm_rss = 0; /* number of shared memory pages that are in memory */
  25 static int shm_swp = 0; /* number of shared memory pages that are in swap */
  26 static int max_shmid = 0; /* every used id is <= max_shmid */
  27 static struct wait_queue *shm_lock = NULL;
  28 static struct shmid_ds *shm_segs[SHMMNI];
  29 
  30 static unsigned short shm_seq = 0; /* incremented, for recognizing stale ids */
  31 
  32 /* some statistics */
  33 static ulong swap_attempts = 0;
  34 static ulong swap_successes = 0;
  35 static ulong used_segs = 0;
  36 
  37 void shm_init (void)
     /* [previous][next][first][last][top][bottom][index][help] */
  38 {
  39         int id;
  40     
  41         for (id = 0; id < SHMMNI; id++) 
  42                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  43         shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
  44         shm_lock = NULL;
  45         return;
  46 }
  47 
  48 static int findkey (key_t key)    
     /* [previous][next][first][last][top][bottom][index][help] */
  49 {
  50         int id;
  51         struct shmid_ds *shp;
  52         
  53         for (id=0; id <= max_shmid; id++) {
  54                 while ((shp = shm_segs[id]) == IPC_NOID) 
  55                         sleep_on (&shm_lock);
  56                 if (shp == IPC_UNUSED)
  57                         continue;
  58                 if (key == shp->shm_perm.key) 
  59                         return id;
  60         }
  61         return -1;
  62 }
  63 
  64 /* 
  65  * allocate new shmid_ds and pgtable. protected by shm_segs[id] = NOID.
  66  */
  67 static int newseg (key_t key, int shmflg, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
  68 {
  69         struct shmid_ds *shp;
  70         int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
  71         int id, i;
  72 
  73         if (size < SHMMIN)
  74                 return -EINVAL;
  75         if (shm_tot + numpages >= SHMALL)
  76                 return -ENOSPC;
  77         for (id=0; id < SHMMNI; id++)
  78                 if (shm_segs[id] == IPC_UNUSED) {
  79                         shm_segs[id] = (struct shmid_ds *) IPC_NOID;
  80                         goto found;
  81                 }
  82         return -ENOSPC;
  83 
  84 found:
  85         shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
  86         if (!shp) {
  87                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  88                 if (shm_lock)
  89                         wake_up (&shm_lock);
  90                 return -ENOMEM;
  91         }
  92 
  93         shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
  94         if (!shp->shm_pages) {
  95                 shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
  96                 if (shm_lock)
  97                         wake_up (&shm_lock);
  98                 kfree_s (shp, sizeof (*shp));
  99                 return -ENOMEM;
 100         }
 101 
 102         for (i=0; i< numpages; shp->shm_pages[i++] = 0);
 103         shm_tot += numpages;
 104         shp->shm_perm.key = key;
 105         shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 106         shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
 107         shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
 108         shp->shm_perm.seq = shm_seq;
 109         shp->shm_segsz = size;
 110         shp->shm_cpid = current->pid;
 111         shp->attaches = NULL;
 112         shp->shm_lpid = shp->shm_nattch = 0;
 113         shp->shm_atime = shp->shm_dtime = 0;
 114         shp->shm_ctime = CURRENT_TIME;
 115         shp->shm_npages = numpages;
 116 
 117         if (id > max_shmid)
 118                 max_shmid = id;
 119         shm_segs[id] = shp;
 120         used_segs++;
 121         if (shm_lock)
 122                 wake_up (&shm_lock);
 123         return id + (int)shm_seq*SHMMNI;
 124 }
 125 
 126 int sys_shmget (key_t key, int size, int shmflg)
     /* [previous][next][first][last][top][bottom][index][help] */
 127 {
 128         struct shmid_ds *shp;
 129         int id = 0;
 130         
 131         if (size < 0 || size > SHMMAX)
 132                 return -EINVAL;
 133         if (key == IPC_PRIVATE) 
 134                 return newseg(key, shmflg, size);
 135         if ((id = findkey (key)) == -1) {
 136                 if (!(shmflg & IPC_CREAT))
 137                         return -ENOENT;
 138                 return newseg(key, shmflg, size);
 139         } 
 140         if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
 141                 return -EEXIST;
 142         shp = shm_segs[id];
 143         if (shp->shm_perm.mode & SHM_DEST)
 144                 return -EIDRM;
 145         if (size > shp->shm_segsz)
 146                 return -EINVAL;
 147         if (ipcperms (&shp->shm_perm, shmflg))
 148                 return -EACCES;
 149         return shp->shm_perm.seq*SHMMNI + id;
 150 }
 151 
 152 /* 
 153  * Only called after testing nattch and SHM_DEST.
 154  * Here pages, pgtable and shmid_ds are freed.
 155  */
 156 static void killseg (int id)
     /* [previous][next][first][last][top][bottom][index][help] */
 157 {
 158         struct shmid_ds *shp;
 159         int i, numpages;
 160         ulong page;
 161 
 162         shp = shm_segs[id];
 163         if (shp == IPC_NOID || shp == IPC_UNUSED) {
 164                 printk ("shm nono: killseg called on unused seg id=%d\n", id);
 165                 return;
 166         }
 167         shp->shm_perm.seq++;     /* for shmat */
 168         numpages = shp->shm_npages; 
 169         shm_seq++;
 170         shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
 171         used_segs--;
 172         if (id == max_shmid) 
 173                 while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
 174         if (!shp->shm_pages) {
 175                 printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
 176                 return;
 177         }
 178         for (i=0; i< numpages ; i++) {
 179                 if (!(page = shp->shm_pages[i]))
 180                         continue;
 181                 if (page & 1) {
 182                         free_page (page & PAGE_MASK);
 183                         shm_rss--;
 184                 } else {
 185                         swap_free (page);
 186                         shm_swp--;
 187                 }
 188         }
 189         kfree_s (shp->shm_pages, numpages * sizeof (ulong));
 190         shm_tot -= numpages;
 191         kfree_s (shp, sizeof (*shp));
 192         return;
 193 }
 194 
 195 int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         struct shmid_ds *shp, tbuf;
 198         struct ipc_perm *ipcp;
 199         int id, err;
 200         
 201         if (cmd < 0 || shmid < 0)
 202                 return -EINVAL;
 203         if (cmd == IPC_SET) {
 204                 if (!buf)
 205                         return -EFAULT;
 206                 err = verify_area (VERIFY_READ, buf, sizeof (*buf));
 207                 if (err)
 208                         return err;
 209                 memcpy_fromfs (&tbuf, buf, sizeof (*buf));
 210         }
 211 
 212         switch (cmd) { /* replace with proc interface ? */
 213         case IPC_INFO: 
 214         {
 215                 struct shminfo shminfo;
 216                 if (!buf)
 217                         return -EFAULT;
 218                 shminfo.shmmni = SHMMNI;
 219                 shminfo.shmmax = SHMMAX;
 220                 shminfo.shmmin = SHMMIN;
 221                 shminfo.shmall = SHMALL;
 222                 shminfo.shmseg = SHMSEG;
 223                 err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
 224                 if (err)
 225                         return err;
 226                 memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
 227                 return max_shmid;
 228         }
 229         case SHM_INFO: 
 230         { 
 231                 struct shm_info shm_info;
 232                 if (!buf)
 233                         return -EFAULT;
 234                 err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
 235                 if (err)
 236                         return err;
 237                 shm_info.used_ids = used_segs; 
 238                 shm_info.shm_rss = shm_rss;
 239                 shm_info.shm_tot = shm_tot;
 240                 shm_info.shm_swp = shm_swp;
 241                 shm_info.swap_attempts = swap_attempts;
 242                 shm_info.swap_successes = swap_successes;
 243                 memcpy_tofs (buf, &shm_info, sizeof(shm_info));
 244                 return max_shmid;
 245         }
 246         case SHM_STAT:
 247                 if (!buf)
 248                         return -EFAULT;
 249                 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
 250                 if (err)
 251                         return err;
 252                 if (shmid > max_shmid)
 253                         return -EINVAL;
 254                 shp = shm_segs[shmid];
 255                 if (shp == IPC_UNUSED || shp == IPC_NOID)
 256                         return -EINVAL;
 257                 if (ipcperms (&shp->shm_perm, S_IRUGO))
 258                         return -EACCES;
 259                 id = shmid + shp->shm_perm.seq * SHMMNI; 
 260                 memcpy_tofs (buf, shp, sizeof(*shp));
 261                 return id;
 262         }
 263         
 264         shp = shm_segs[id = shmid % SHMMNI];
 265         if (shp == IPC_UNUSED || shp == IPC_NOID)
 266                 return -EINVAL;
 267         ipcp = &shp->shm_perm;
 268         if (ipcp->seq != shmid / SHMMNI) 
 269                 return -EIDRM;
 270         
 271         switch (cmd) {
 272         case SHM_UNLOCK:
 273                 if (!suser())
 274                         return -EPERM;
 275                 if (!(ipcp->mode & SHM_LOCKED))
 276                         return -EINVAL;
 277                 ipcp->mode &= ~SHM_LOCKED;
 278                 break;
 279         case SHM_LOCK:
 280 /* Allow superuser to lock segment in memory */
 281 /* Should the pages be faulted in here or leave it to user? */
 282 /* need to determine interaction with current->swappable */
 283                 if (!suser())
 284                         return -EPERM;
 285                 if (ipcp->mode & SHM_LOCKED)
 286                         return -EINVAL;
 287                 ipcp->mode |= SHM_LOCKED;
 288                 break;
 289         case IPC_STAT:
 290                 if (ipcperms (ipcp, S_IRUGO))
 291                         return -EACCES;
 292                 if (!buf)
 293                         return -EFAULT;
 294                 err = verify_area (VERIFY_WRITE, buf, sizeof (*shp));
 295                 if (err)
 296                         return err;
 297                 memcpy_tofs (buf, shp, sizeof(*shp));
 298                 break;
 299         case IPC_SET:
 300                 if (suser() || current->euid == shp->shm_perm.uid ||
 301                     current->euid == shp->shm_perm.cuid) {
 302                         ipcp->uid = tbuf.shm_perm.uid;
 303                         ipcp->gid = tbuf.shm_perm.gid;
 304                         ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
 305                                 | (tbuf.shm_perm.mode & S_IRWXUGO);
 306                         shp->shm_ctime = CURRENT_TIME;
 307                         break;
 308                 }
 309                 return -EPERM;
 310         case IPC_RMID:
 311                 if (suser() || current->euid == shp->shm_perm.uid ||
 312                     current->euid == shp->shm_perm.cuid) {
 313                         shp->shm_perm.mode |= SHM_DEST;
 314                         if (shp->shm_nattch <= 0) 
 315                                 killseg (id);
 316                         break;
 317                 }
 318                 return -EPERM;
 319         default:
 320                 return -EINVAL;
 321         }
 322         return 0;
 323 }
 324 
 325 /*
 326  * check range is unmapped, ensure page tables exist
 327  * mark page table entries with shm_sgn.
 328  * if remap != 0 the range is remapped.
 329  */
 330 static int shm_map (struct shm_desc *shmd, int remap)
     /* [previous][next][first][last][top][bottom][index][help] */
 331 {
 332         unsigned long invalid = 0;
 333         unsigned long *page_table;
 334         unsigned long tmp, shm_sgn;
 335         unsigned long page_dir = shmd->task->tss.cr3;
 336         
 337         /* check that the range is unmapped and has page_tables */
 338         for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE) { 
 339                 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
 340                 if (*page_table & PAGE_PRESENT) {
 341                         page_table = (ulong *) (PAGE_MASK & *page_table);
 342                         page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
 343                         if (*page_table) {
 344                                 if (!remap)
 345                                         return -EINVAL;
 346                                 if (*page_table & PAGE_PRESENT) {
 347                                         --current->rss;
 348                                         free_page (*page_table & PAGE_MASK);
 349                                 }
 350                                 else
 351                                         swap_free (*page_table);
 352                                 invalid++;
 353                         }
 354                         continue;
 355                 }  
 356               {
 357                 unsigned long new_pt;
 358                 if(!(new_pt = get_free_page(GFP_KERNEL)))       /* clearing needed?  SRB. */
 359                         return -ENOMEM;
 360                 *page_table = new_pt | PAGE_TABLE;
 361                 tmp |= ((PAGE_SIZE << 10) - PAGE_SIZE);
 362         }}
 363         if (invalid)
 364                 invalidate();
 365 
 366         /* map page range */
 367         shm_sgn = shmd->shm_sgn;
 368         for (tmp = shmd->start; tmp < shmd->end; tmp += PAGE_SIZE, 
 369              shm_sgn += (1 << SHM_IDX_SHIFT)) { 
 370                 page_table = PAGE_DIR_OFFSET(page_dir,tmp);
 371                 page_table = (ulong *) (PAGE_MASK & *page_table);
 372                 page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 373                 *page_table = shm_sgn;
 374         }
 375         return 0;
 376 }
 377 
 378 /* 
 379  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
 380  * raddr is needed to return addresses above 2Gig.
 381  * Specific attaches are allowed over the executable....
 382  */
 383 int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 384 {
 385         struct shmid_ds *shp;
 386         struct shm_desc *shmd;
 387         int err;
 388         unsigned int id;
 389         unsigned long addr;
 390         
 391         if (shmid < 0)
 392                 return -EINVAL;
 393 
 394         if (raddr) {
 395                 err = verify_area(VERIFY_WRITE, raddr, sizeof(long));
 396                 if (err)
 397                         return err;
 398         }
 399 
 400         shp = shm_segs[id = shmid % SHMMNI];
 401         if (shp == IPC_UNUSED || shp == IPC_NOID)
 402                 return -EINVAL;
 403 
 404         if (!(addr = (ulong) shmaddr)) {
 405                 if (shmflg & SHM_REMAP)
 406                         return -EINVAL;
 407                 /* set addr below  all current unspecified attaches */
 408                 addr = SHM_RANGE_END; 
 409                 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
 410                         if (shmd->start < SHM_RANGE_START)
 411                                 continue;
 412                         if (addr >= shmd->start)
 413                                 addr = shmd->start;
 414                 }
 415                 addr = (addr - shp->shm_segsz) & PAGE_MASK;
 416         } else if (addr & (SHMLBA-1)) {
 417                 if (shmflg & SHM_RND) 
 418                         addr &= ~(SHMLBA-1);       /* round down */
 419                 else
 420                         return -EINVAL;
 421         }
 422         if ((addr > current->start_stack - 16384 - PAGE_SIZE*shp->shm_npages))
 423                 return -EINVAL;
 424         if (shmflg & SHM_REMAP)
 425                 for (shmd = current->shm; shmd; shmd = shmd->task_next) {
 426                         if (addr >= shmd->start && addr < shmd->end)
 427                                 return -EINVAL;
 428                         if (addr + shp->shm_segsz >= shmd->start && 
 429                             addr + shp->shm_segsz < shmd->end)
 430                                 return -EINVAL;
 431                 }
 432 
 433         if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
 434                 return -EACCES;
 435         if (shp->shm_perm.seq != shmid / SHMMNI) 
 436                 return -EIDRM;
 437 
 438         shmd = (struct shm_desc *) kmalloc (sizeof(*shmd), GFP_KERNEL);
 439         if (!shmd)
 440                 return -ENOMEM;
 441         if ((shp != shm_segs[id]) || (shp->shm_perm.seq != shmid / SHMMNI)) {
 442                 kfree_s (shmd, sizeof (*shmd));
 443                 return -EIDRM;
 444         }
 445         shmd->shm_sgn = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
 446                 (shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
 447         shmd->start = addr;
 448         shmd->end = addr + shp->shm_npages * PAGE_SIZE;
 449         shmd->task = current;
 450 
 451         shp->shm_nattch++;            /* prevent destruction */
 452         if (addr < current->end_data) {
 453                 iput (current->executable);
 454                 current->executable = NULL;
 455 /*              current->end_data = current->end_code = 0; */
 456         }
 457 
 458         if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
 459                 if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
 460                         killseg(id);
 461                 kfree_s (shmd, sizeof (*shmd));
 462                 return err;
 463         }
 464                 
 465         shmd->task_next = current->shm;
 466         current->shm = shmd;
 467         shmd->seg_next = shp->attaches;
 468         shp->attaches = shmd;
 469         shp->shm_lpid = current->pid;
 470         shp->shm_atime = CURRENT_TIME;
 471         if (!raddr)
 472                 return addr;
 473         put_fs_long (addr, raddr);
 474         return 0;
 475 }
 476 
 477 /*
 478  * remove the first attach descriptor from the list *shmdp.
 479  * free memory for segment if it is marked destroyed.
 480  * The descriptor is detached before the sleep in unmap_page_range.
 481  */
 482 static void detach (struct shm_desc **shmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 483 {
 484         struct shm_desc *shmd = *shmdp; 
 485         struct shmid_ds *shp;
 486         int id;
 487         
 488         id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
 489         shp = shm_segs[id];
 490         *shmdp = shmd->task_next;
 491         for (shmdp = &shp->attaches; *shmdp; shmdp = &(*shmdp)->seg_next)
 492                 if (*shmdp == shmd) {
 493                         *shmdp = shmd->seg_next; 
 494                         goto found; 
 495                 }
 496         printk("detach: shm segment (id=%d) attach list inconsistent\n",id);
 497         
 498  found:
 499         unmap_page_range (shmd->start, shp->shm_segsz); /* sleeps */
 500         kfree_s (shmd, sizeof (*shmd));
 501         shp->shm_lpid = current->pid;
 502         shp->shm_dtime = CURRENT_TIME;
 503         if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
 504                 killseg (id); /* sleeps */
 505         return;
 506 }
 507 
 508 /*
 509  * detach and kill segment if marked destroyed.
 510  * The work is done in detach.
 511  */
 512 int sys_shmdt (char *shmaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 513 {
 514         struct shm_desc *shmd, **shmdp; 
 515         
 516         for (shmdp = &current->shm; (shmd = *shmdp); shmdp=&shmd->task_next) { 
 517                 if (shmd->start == (ulong) shmaddr) {
 518                         detach (shmdp);
 519                         return 0;
 520                 }
 521         }
 522         return -EINVAL;
 523 }
 524 
 525 /* 
 526  * detach all attached segments. 
 527  */
 528 void shm_exit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 529 {
 530         while (current->shm) 
 531                 detach(&current->shm);
 532         return;
 533 }
 534 
 535 /* 
 536  * copy the parent shm descriptors and update nattch
 537  * parent is stuck in fork so an attach on each segment is assured.
 538  * copy_page_tables does the mapping.
 539  */
 540 int shm_fork (struct task_struct *p1, struct task_struct *p2)
     /* [previous][next][first][last][top][bottom][index][help] */
 541 {
 542         struct shm_desc *shmd, *new_desc = NULL, *tmp;
 543         struct shmid_ds *shp;
 544         int id;
 545 
 546         if (!p1->shm)
 547                 return 0;
 548         for (shmd = p1->shm; shmd; shmd = shmd->task_next) {
 549                 tmp = (struct shm_desc *) kmalloc(sizeof(*tmp), GFP_KERNEL);
 550                 if (!tmp) {
 551                         while (new_desc) { 
 552                                 tmp = new_desc->task_next; 
 553                                 kfree_s (new_desc, sizeof (*new_desc)); 
 554                                 new_desc = tmp; 
 555                         }
 556                         free_page_tables (p2);
 557                         return -ENOMEM;
 558                 }
 559                 *tmp = *shmd;
 560                 tmp->task = p2;
 561                 tmp->task_next = new_desc;
 562                 new_desc = tmp;
 563         }
 564         p2->shm = new_desc;
 565         for (shmd = new_desc; shmd; shmd = shmd->task_next) {
 566                 id = (shmd->shm_sgn >> SHM_ID_SHIFT) & SHM_ID_MASK;
 567                 shp = shm_segs[id];
 568                 if (shp == IPC_UNUSED) {
 569                         printk("shm_fork: unused id=%d PANIC\n", id);
 570                         return -ENOMEM;
 571                 }
 572                 shmd->seg_next = shp->attaches;
 573                 shp->attaches = shmd;
 574                 shp->shm_nattch++;
 575                 shp->shm_atime = CURRENT_TIME;
 576                 shp->shm_lpid = current->pid;
 577         }
 578         return 0;
 579 }
 580 
 581 /*
 582  * page not present ... go through shm_pages .. called from swap_in()
 583  */
 584 void shm_no_page (unsigned long *ptent)
     /* [previous][next][first][last][top][bottom][index][help] */
 585 {
 586         unsigned long page;
 587         unsigned long code = *ptent;
 588         struct shmid_ds *shp;
 589         unsigned int id, idx;
 590 
 591         id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
 592         if (id > max_shmid) {
 593                 printk ("shm_no_page: id=%d too big. proc mem corruptedn", id);
 594                 return;
 595         }
 596         shp = shm_segs[id];
 597         if (shp == IPC_UNUSED || shp == IPC_NOID) {
 598                 printk ("shm_no_page: id=%d invalid. Race.\n", id);
 599                 return;
 600         }
 601         idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
 602         if (idx >= shp->shm_npages) {
 603                 printk ("shm_no_page : too large page index. id=%d\n", id);
 604                 return;
 605         }
 606 
 607         if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
 608                 if(!(page = get_free_page(GFP_KERNEL))) {
 609                         oom(current);
 610                         *ptent = BAD_PAGE | PAGE_ACCESSED | 7;
 611                         return;
 612                 }
 613                 if (shp->shm_pages[idx] & PAGE_PRESENT) {
 614                         free_page (page);
 615                         goto done;
 616                 }
 617                 if (shp->shm_pages[idx]) {
 618                         read_swap_page (shp->shm_pages[idx], (char *) page);
 619                         if (shp->shm_pages[idx] & PAGE_PRESENT)  {
 620                                 free_page (page);
 621                                 goto done;
 622                         }
 623                         swap_free (shp->shm_pages[idx]);
 624                         shm_swp--;
 625                 }
 626                 shm_rss++;
 627                 shp->shm_pages[idx] = page | (PAGE_SHARED | PAGE_DIRTY);
 628         } else 
 629                 --current->maj_flt;  /* was incremented in do_no_page */
 630 
 631 done:
 632         current->min_flt++;
 633         page = shp->shm_pages[idx];
 634         if (code & SHM_READ_ONLY)           /* write-protect */
 635                 page &= ~2;
 636         mem_map[MAP_NR(page)]++;
 637         *ptent = page;
 638         return;
 639 }
 640 
 641 /*
 642  * Goes through counter = (shm_rss << prio) present shm pages. 
 643  */
 644 static unsigned long swap_id = 0; /* currently being swapped */
 645 static unsigned long swap_idx = 0; /* next to swap */
 646 
 647 int shm_swap (int prio)
     /* [previous][next][first][last][top][bottom][index][help] */
 648 {
 649         unsigned long page;
 650         struct shmid_ds *shp;
 651         struct shm_desc *shmd;
 652         unsigned int swap_nr;
 653         unsigned long id, idx, invalid = 0;
 654         int counter;
 655 
 656         counter = shm_rss >> prio;
 657         if (!counter || !(swap_nr = get_swap_page()))
 658                 return 0;
 659 
 660  check_id:
 661         shp = shm_segs[swap_id];
 662         if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
 663                 swap_idx = 0; 
 664                 if (++swap_id > max_shmid)
 665                         swap_id = 0;
 666                 goto check_id;
 667         }
 668         id = swap_id;
 669 
 670  check_table:
 671         idx = swap_idx++; 
 672         if (idx  >= shp->shm_npages) { 
 673                 swap_idx = 0;
 674                 if (++swap_id > max_shmid)
 675                         swap_id = 0;
 676                 goto check_id;
 677         }
 678 
 679         page = shp->shm_pages[idx];
 680         if (!(page & PAGE_PRESENT))
 681                 goto check_table;
 682         swap_attempts++;
 683 
 684         if (--counter < 0) { /* failed */
 685                 if (invalid)
 686                         invalidate();
 687                 swap_free (swap_nr);
 688                 return 0;
 689         }
 690         for (shmd = shp->attaches; shmd; shmd = shmd->seg_next) {
 691                 unsigned long tmp, *pte;
 692                 if ((shmd->shm_sgn >> SHM_ID_SHIFT & SHM_ID_MASK) != id) {
 693                         printk ("shm_swap: id=%ld does not match shmd\n", id);
 694                         continue;
 695                 }
 696                 tmp = shmd->start + (idx << PAGE_SHIFT);
 697                 if (tmp >= shmd->end) {
 698                         printk ("shm_swap: too large idx=%ld id=%ld PANIC\n",idx, id);
 699                         continue;
 700                 }
 701                 pte = PAGE_DIR_OFFSET(shmd->task->tss.cr3,tmp);
 702                 if (!(*pte & 1)) { 
 703                         printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n", 
 704                                         id, shmd->start, idx);
 705                         *pte = 0;
 706                         continue;
 707                 } 
 708                 pte = (ulong *) (PAGE_MASK & *pte);
 709                 pte += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
 710                 tmp = *pte;
 711                 if (!(tmp & PAGE_PRESENT))
 712                         continue;
 713                 if (tmp & PAGE_ACCESSED) {
 714                         *pte &= ~PAGE_ACCESSED;
 715                         continue;  
 716                 }
 717                 tmp = shmd->shm_sgn | idx << SHM_IDX_SHIFT;
 718                 *pte = tmp;
 719                 mem_map[MAP_NR(page)]--;
 720                 shmd->task->rss--;
 721                 invalid++;
 722         }
 723 
 724         if (mem_map[MAP_NR(page)] != 1) 
 725                 goto check_table;
 726         page &= PAGE_MASK;
 727         shp->shm_pages[idx] = swap_nr;
 728         if (invalid)
 729                 invalidate();
 730         write_swap_page (swap_nr, (char *) page);
 731         free_page (page);
 732         swap_successes++;
 733         shm_swp++;
 734         shm_rss--;
 735         return 1;
 736 }

/* [previous][next][first][last][top][bottom][index][help] */