root/fs/proc/mem.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. check_range
  2. get_task
  3. mem_read
  4. mem_write
  5. mem_lseek
  6. mem_mmap

   1 /*
   2  *  linux/fs/proc/mem.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 #include <linux/types.h>
   8 #include <linux/errno.h>
   9 #include <linux/sched.h>
  10 #include <linux/kernel.h>
  11 #include <linux/mm.h>
  12 
  13 #include <asm/page.h>
  14 #include <asm/segment.h>
  15 #include <asm/io.h>
  16 #include <asm/pgtable.h>
  17 
  18 /*
  19  * mem_write isn't really a good idea right now. It needs
  20  * to check a lot more: if the process we try to write to 
  21  * dies in the middle right now, mem_write will overwrite
  22  * kernel memory.. This disables it altogether.
  23  */
  24 #define mem_write NULL
  25 
  26 static int check_range(struct task_struct * tsk, unsigned long addr, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
  27 {
  28         struct vm_area_struct *vma;
  29         int retval;
  30 
  31         vma = find_vma(tsk, addr);
  32         if (!vma)
  33                 return -EACCES;
  34         if (vma->vm_start > addr)
  35                 return -EACCES;
  36         if (!(vma->vm_flags & VM_READ))
  37                 return -EACCES;
  38         while ((retval = vma->vm_end - addr) < count) {
  39                 struct vm_area_struct *next = vma->vm_next;
  40                 if (!next)
  41                         break;
  42                 if (vma->vm_end != next->vm_start)
  43                         break;
  44                 if (!(next->vm_flags & VM_READ))
  45                         break;
  46                 vma = next;
  47         }
  48         if (retval > count)
  49                 retval = count;
  50         return retval;
  51 }
  52 
  53 static struct task_struct * get_task(int pid)
     /* [previous][next][first][last][top][bottom][index][help] */
  54 {
  55         struct task_struct * tsk = current;
  56 
  57         if (pid != tsk->pid) {
  58                 int i;
  59                 tsk = NULL;
  60                 for (i = 1 ; i < NR_TASKS ; i++)
  61                         if (task[i] && task[i]->pid == pid) {
  62                                 tsk = task[i];
  63                                 break;
  64                         }
  65                 /*
  66                  * allow accesses only under the same circumstances
  67                  * that we would allow ptrace to work
  68                  */
  69                 if (tsk) {
  70                         if (!(tsk->flags & PF_PTRACED)
  71                             || tsk->state != TASK_STOPPED
  72                             || tsk->p_pptr != current)
  73                                 tsk = NULL;
  74                 }
  75         }
  76         return tsk;
  77 }
  78 
  79 static int mem_read(struct inode * inode, struct file * file,char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         pgd_t *page_dir;
  82         pmd_t *page_middle;
  83         pte_t pte;
  84         char * page;
  85         struct task_struct * tsk;
  86         unsigned long addr;
  87         char *tmp;
  88         int i;
  89 
  90         if (count < 0)
  91                 return -EINVAL;
  92         tsk = get_task(inode->i_ino >> 16);
  93         if (!tsk)
  94                 return -ESRCH;
  95         addr = file->f_pos;
  96         count = check_range(tsk, addr, count);
  97         if (count < 0)
  98                 return count;
  99         tmp = buf;
 100         while (count > 0) {
 101                 if (current->signal & ~current->blocked)
 102                         break;
 103                 page_dir = pgd_offset(tsk->mm,addr);
 104                 if (pgd_none(*page_dir))
 105                         break;
 106                 if (pgd_bad(*page_dir)) {
 107                         printk("Bad page dir entry %08lx\n", pgd_val(*page_dir));
 108                         pgd_clear(page_dir);
 109                         break;
 110                 }
 111                 page_middle = pmd_offset(page_dir,addr);
 112                 if (pmd_none(*page_middle))
 113                         break;
 114                 if (pmd_bad(*page_middle)) {
 115                         printk("Bad page middle entry %08lx\n", pmd_val(*page_middle));
 116                         pmd_clear(page_middle);
 117                         break;
 118                 }
 119                 pte = *pte_offset(page_middle,addr);
 120                 if (!pte_present(pte))
 121                         break;
 122                 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
 123                 i = PAGE_SIZE-(addr & ~PAGE_MASK);
 124                 if (i > count)
 125                         i = count;
 126                 memcpy_tofs(tmp, page, i);
 127                 addr += i;
 128                 tmp += i;
 129                 count -= i;
 130         }
 131         file->f_pos = addr;
 132         return tmp-buf;
 133 }
 134 
 135 #ifndef mem_write
 136 
 137 static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         pgd_t *page_dir;
 140         pmd_t *page_middle;
 141         pte_t pte;
 142         char * page;
 143         struct task_struct * tsk;
 144         unsigned long addr;
 145         char *tmp;
 146         int i;
 147 
 148         if (count < 0)
 149                 return -EINVAL;
 150         addr = file->f_pos;
 151         tsk = get_task(inode->i_ino >> 16);
 152         if (!tsk)
 153                 return -ESRCH;
 154         tmp = buf;
 155         while (count > 0) {
 156                 if (current->signal & ~current->blocked)
 157                         break;
 158                 page_dir = pgd_offset(tsk,addr);
 159                 if (pgd_none(*page_dir))
 160                         break;
 161                 if (pgd_bad(*page_dir)) {
 162                         printk("Bad page dir entry %08lx\n", pgd_val(*page_dir));
 163                         pgd_clear(page_dir);
 164                         break;
 165                 }
 166                 page_middle = pmd_offset(page_dir,addr);
 167                 if (pmd_none(*page_middle))
 168                         break;
 169                 if (pmd_bad(*page_middle)) {
 170                         printk("Bad page middle entry %08lx\n", pmd_val(*page_middle));
 171                         pmd_clear(page_middle);
 172                         break;
 173                 }
 174                 pte = *pte_offset(page_middle,addr);
 175                 if (!pte_present(pte))
 176                         break;
 177                 if (!pte_write(pte))
 178                         break;
 179                 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
 180                 i = PAGE_SIZE-(addr & ~PAGE_MASK);
 181                 if (i > count)
 182                         i = count;
 183                 memcpy_fromfs(page, tmp, i);
 184                 addr += i;
 185                 tmp += i;
 186                 count -= i;
 187         }
 188         file->f_pos = addr;
 189         if (tmp != buf)
 190                 return tmp-buf;
 191         if (current->signal & ~current->blocked)
 192                 return -ERESTARTSYS;
 193         return 0;
 194 }
 195 
 196 #endif
 197 
 198 static int mem_lseek(struct inode * inode, struct file * file, off_t offset, int orig)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         switch (orig) {
 201                 case 0:
 202                         file->f_pos = offset;
 203                         return file->f_pos;
 204                 case 1:
 205                         file->f_pos += offset;
 206                         return file->f_pos;
 207                 default:
 208                         return -EINVAL;
 209         }
 210 }
 211 
 212 /*
 213  * This isn't really reliable by any means..
 214  */
 215 int mem_mmap(struct inode * inode, struct file * file,
     /* [previous][next][first][last][top][bottom][index][help] */
 216              struct vm_area_struct * vma)
 217 {
 218         struct task_struct *tsk;
 219         pgd_t *src_dir, *dest_dir;
 220         pmd_t *src_middle, *dest_middle;
 221         pte_t *src_table, *dest_table;
 222         unsigned long stmp, dtmp;
 223         struct vm_area_struct *src_vma = NULL;
 224 
 225         /* Get the source's task information */
 226 
 227         tsk = get_task(inode->i_ino >> 16);
 228 
 229         if (!tsk)
 230                 return -ESRCH;
 231 
 232         /* Ensure that we have a valid source area.  (Has to be mmap'ed and
 233          have valid page information.)  We can't map shared memory at the
 234          moment because working out the vm_area_struct & nattach stuff isn't
 235          worth it. */
 236 
 237         src_vma = tsk->mm->mmap;
 238         stmp = vma->vm_offset;
 239         while (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
 240                 while (src_vma && stmp > src_vma->vm_end)
 241                         src_vma = src_vma->vm_next;
 242                 if (!src_vma || (src_vma->vm_flags & VM_SHM))
 243                         return -EINVAL;
 244 
 245                 src_dir = pgd_offset(tsk->mm, stmp);
 246                 if (pgd_none(*src_dir))
 247                         return -EINVAL;
 248                 if (pgd_bad(*src_dir)) {
 249                         printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir));
 250                         return -EINVAL;
 251                 }
 252                 src_middle = pmd_offset(src_dir, stmp);
 253                 if (pmd_none(*src_middle))
 254                         return -EINVAL;
 255                 if (pmd_bad(*src_middle)) {
 256                         printk("Bad source page middle entry %08lx\n", pmd_val(*src_middle));
 257                         return -EINVAL;
 258                 }
 259                 src_table = pte_offset(src_middle, stmp);
 260                 if (pte_none(*src_table))
 261                         return -EINVAL;
 262 
 263                 if (stmp < src_vma->vm_start) {
 264                         if (!(src_vma->vm_flags & VM_GROWSDOWN))
 265                                 return -EINVAL;
 266                         if (src_vma->vm_end - stmp > current->rlim[RLIMIT_STACK].rlim_cur)
 267                                 return -EINVAL;
 268                 }
 269                 stmp += PAGE_SIZE;
 270         }
 271 
 272         src_vma = tsk->mm->mmap;
 273         stmp    = vma->vm_offset;
 274         dtmp    = vma->vm_start;
 275 
 276         flush_cache_range(vma->vm_mm, vma->vm_start, vma->vm_end);
 277         flush_cache_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
 278         while (dtmp < vma->vm_end) {
 279                 while (src_vma && stmp > src_vma->vm_end)
 280                         src_vma = src_vma->vm_next;
 281 
 282                 src_dir = pgd_offset(tsk->mm, stmp);
 283                 src_middle = pmd_offset(src_dir, stmp);
 284                 src_table = pte_offset(src_middle, stmp);
 285 
 286                 dest_dir = pgd_offset(current->mm, dtmp);
 287                 dest_middle = pmd_alloc(dest_dir, dtmp);
 288                 if (!dest_middle)
 289                         return -ENOMEM;
 290                 dest_table = pte_alloc(dest_middle, dtmp);
 291                 if (!dest_table)
 292                         return -ENOMEM;
 293 
 294                 if (!pte_present(*src_table))
 295                         do_no_page(tsk, src_vma, stmp, 1);
 296 
 297                 if ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
 298                         do_wp_page(tsk, src_vma, stmp, 1);
 299 
 300                 set_pte(src_table, pte_mkdirty(*src_table));
 301                 set_pte(dest_table, *src_table);
 302                 mem_map[MAP_NR(pte_page(*src_table))].count++;
 303 
 304                 stmp += PAGE_SIZE;
 305                 dtmp += PAGE_SIZE;
 306         }
 307 
 308         flush_tlb_range(vma->vm_mm, vma->vm_start, vma->vm_end);
 309         flush_tlb_range(src_vma->vm_mm, src_vma->vm_start, src_vma->vm_end);
 310         return 0;
 311 }
 312 
 313 static struct file_operations proc_mem_operations = {
 314         mem_lseek,
 315         mem_read,
 316         mem_write,
 317         NULL,           /* mem_readdir */
 318         NULL,           /* mem_select */
 319         NULL,           /* mem_ioctl */
 320         mem_mmap,       /* mmap */
 321         NULL,           /* no special open code */
 322         NULL,           /* no special release code */
 323         NULL            /* can't fsync */
 324 };
 325 
 326 struct inode_operations proc_mem_inode_operations = {
 327         &proc_mem_operations,   /* default base directory file-ops */
 328         NULL,                   /* create */
 329         NULL,                   /* lookup */
 330         NULL,                   /* link */
 331         NULL,                   /* unlink */
 332         NULL,                   /* symlink */
 333         NULL,                   /* mkdir */
 334         NULL,                   /* rmdir */
 335         NULL,                   /* mknod */
 336         NULL,                   /* rename */
 337         NULL,                   /* readlink */
 338         NULL,                   /* follow_link */
 339         NULL,                   /* readpage */
 340         NULL,                   /* writepage */
 341         NULL,                   /* bmap */
 342         NULL,                   /* truncate */
 343         NULL                    /* permission */
 344 };

/* [previous][next][first][last][top][bottom][index][help] */