This source file includes following definitions.
- __bad_pagetable
- __bad_page
- show_mem
- kernel_page_table
- map_chunk
- paging_init
- mem_init
- si_meminfo
1
2
3
4
5
6
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #ifdef CONFIG_BLK_DEV_INITRD
16 #include <linux/blk.h>
17 #endif
18
19 #include <asm/segment.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/system.h>
23 #include <asm/bootinfo.h>
24 #include <asm/machdep.h>
25
26 extern void die_if_kernel(char *,struct pt_regs *,long);
27 extern void show_net_buffers(void);
28 extern unsigned long mm_phys_to_virt (unsigned long addr);
29 extern char *rd_start;
30 extern int rd_doload;
31
32 unsigned long ramdisk_length;
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 static unsigned long empty_bad_page_table;
48
49 pte_t *__bad_pagetable(void)
50 {
51 memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
52 return (pte_t *)empty_bad_page_table;
53 }
54
55 static unsigned long empty_bad_page;
56
57 pte_t __bad_page(void)
58 {
59 memset ((void *)empty_bad_page, 0, PAGE_SIZE);
60 return pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED));
61 }
62
63 unsigned long empty_zero_page;
64
65 void show_mem(void)
66 {
67 unsigned long i;
68 int free = 0, total = 0, reserved = 0, nonshared = 0, shared = 0;
69
70 printk("\nMem-info:\n");
71 show_free_areas();
72 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
73 i = high_memory >> PAGE_SHIFT;
74 while (i-- > 0) {
75 total++;
76 if (PageReserved(mem_map+i))
77 reserved++;
78 else if (!mem_map[i].count)
79 free++;
80 else if (mem_map[i].count == 1)
81 nonshared++;
82 else
83 shared += mem_map[i].count-1;
84 }
85 printk("%d pages of RAM\n",total);
86 printk("%d free pages\n",free);
87 printk("%d reserved pages\n",reserved);
88 printk("%d pages nonshared\n",nonshared);
89 printk("%d pages shared\n",shared);
90 show_buffers();
91 #ifdef CONFIG_NET
92 show_net_buffers();
93 #endif
94 }
95
96 #if 0
97
98
99
100
101
102 unsigned long mm_cachebits;
103 #endif
104
105 pte_t *kernel_page_table (unsigned long *memavailp)
106 {
107 pte_t *ptablep;
108
109 ptablep = (pte_t *)*memavailp;
110 *memavailp += PAGE_SIZE;
111
112 nocache_page ((unsigned long)ptablep);
113
114 return ptablep;
115 }
116
117 static unsigned long map_chunk (unsigned long addr,
118 unsigned long size,
119 unsigned long *memavailp)
120 {
121 #define ONEMEG (1024*1024)
122 #define L3TREESIZE (256*1024)
123
124 int is040 = m68k_is040or060;
125 static unsigned long mem_mapped = 0;
126 static unsigned long virtaddr = 0;
127 static pte_t *ktablep = NULL;
128 unsigned long *kpointerp;
129 unsigned long physaddr;
130 extern pte_t *kpt;
131 int pindex;
132 pgd_t *page_dir = pgd_offset_k (virtaddr);
133
134 if (!pgd_present (*page_dir)) {
135
136 kpointerp = (unsigned long *) get_kpointer_table ();
137 pgd_set (page_dir, (pmd_t *) kpointerp);
138 memset (kpointerp, 0, PTRS_PER_PMD * sizeof (pmd_t));
139 }
140 else
141 kpointerp = (unsigned long *) pgd_page (*page_dir);
142
143
144
145
146
147 pindex = (virtaddr >> 18) & 0x7f;
148
149 #ifdef DEBUG
150 printk ("mm=%ld, kernel_pg_dir=%p, kpointerp=%p, pindex=%d\n",
151 mem_mapped, kernel_pg_dir, kpointerp, pindex);
152 #endif
153
154
155
156
157
158
159
160 if (is040 && mem_mapped == 0)
161 ktablep = kpt;
162
163 for (physaddr = addr;
164 physaddr < addr + size;
165 mem_mapped += L3TREESIZE, virtaddr += L3TREESIZE) {
166
167 #ifdef DEBUG
168 printk ("pa=%#lx va=%#lx ", physaddr, virtaddr);
169 #endif
170
171 if (pindex > 127 && mem_mapped >= 32*ONEMEG) {
172
173 #ifdef DEBUG
174 printk ("[new pointer]");
175 #endif
176
177 kpointerp = (unsigned long *)get_kpointer_table ();
178 pgd_set(pgd_offset_k(virtaddr), (pmd_t *)kpointerp);
179 pindex = 0;
180 }
181
182 if (is040) {
183 int i;
184 unsigned long ktable;
185
186
187
188
189
190
191
192 if (mem_mapped < 4 * ONEMEG)
193 {
194 #ifdef DEBUG
195 printk ("Already initialized\n");
196 #endif
197 physaddr += L3TREESIZE;
198 pindex++;
199 continue;
200 }
201 #ifdef DEBUG
202 printk ("[setup table]");
203 #endif
204
205
206
207
208
209
210 if ((pindex & 15) == 0) {
211
212 #ifdef DEBUG
213 printk ("[new table]");
214 #endif
215 ktablep = kernel_page_table (memavailp);
216 }
217
218 ktable = VTOP(ktablep);
219
220
221
222
223
224 for (i = 0; i < 64; i++) {
225 pte_val(ktablep[i]) = physaddr | _PAGE_PRESENT
226 | _PAGE_CACHE040 | _PAGE_GLOBAL040;
227 physaddr += PAGE_SIZE;
228 }
229 ktablep += 64;
230
231
232
233
234
235
236
237 kpointerp[pindex++] = ktable | _PAGE_TABLE;
238 } else {
239
240
241
242
243 #ifdef DEBUG
244 printk ("[early term] ");
245 #endif
246 if (virtaddr == 0UL) {
247
248
249
250
251
252 int i;
253 unsigned long *tbl;
254
255 tbl = (unsigned long *)get_kpointer_table();
256
257 kpointerp[pindex++] = VTOP(tbl) | _PAGE_TABLE;
258
259 for (i = 0; i < 64; i++, physaddr += PAGE_SIZE)
260 tbl[i] = physaddr | _PAGE_PRESENT;
261
262
263 tbl[0] = 0;
264 } else {
265
266 kpointerp[pindex++] = physaddr | _PAGE_PRESENT;
267 #ifdef DEBUG
268 printk ("%lx=%lx ", VTOP(&kpointerp[pindex-1]),
269 kpointerp[pindex-1]);
270 #endif
271 physaddr += 64 * PAGE_SIZE;
272 }
273 }
274 #ifdef DEBUG
275 printk ("\n");
276 #endif
277 }
278
279 return mem_mapped;
280 }
281
282 extern unsigned long free_area_init(unsigned long, unsigned long);
283
284 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
285
286
287
288
289
290
291
292 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
293 {
294 int chunk;
295 unsigned long mem_avail = 0;
296
297 extern unsigned long availmem;
298
299 #ifdef DEBUG
300 {
301 extern pte_t *kpt;
302 printk ("start of paging_init (%p, %p, %lx, %lx, %lx)\n",
303 kernel_pg_dir, kpt, availmem, start_mem, end_mem);
304 }
305 #endif
306
307 #if 0
308
309
310
311 mm_cachebits = m68k_is040or060 ? _PAGE_CACHE040 : 0;
312
313
314 protection_map[0] = PAGE_READONLY;
315 protection_map[1] = PAGE_READONLY;
316 protection_map[2] = PAGE_COPY;
317 protection_map[3] = PAGE_COPY;
318 protection_map[4] = PAGE_READONLY;
319 protection_map[5] = PAGE_READONLY;
320 protection_map[6] = PAGE_COPY;
321 protection_map[7] = PAGE_COPY;
322 protection_map[8] = PAGE_READONLY;
323 protection_map[9] = PAGE_READONLY;
324 protection_map[10] = PAGE_SHARED;
325 protection_map[11] = PAGE_SHARED;
326 protection_map[12] = PAGE_READONLY;
327 protection_map[13] = PAGE_READONLY;
328 protection_map[14] = PAGE_SHARED;
329 protection_map[15] = PAGE_SHARED;
330 #endif
331
332
333
334
335
336
337
338 for (chunk = 0; chunk < boot_info.num_memory; chunk++) {
339 mem_avail = map_chunk (boot_info.memory[chunk].addr,
340 boot_info.memory[chunk].size,
341 &availmem);
342
343 }
344 flush_tlb_all();
345 #ifdef DEBUG
346 printk ("memory available is %ldKB\n", mem_avail >> 10);
347 #endif
348
349
350
351
352
353 start_mem = availmem;
354
355 #ifdef DEBUG
356 printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
357 start_mem, end_mem);
358 #endif
359
360
361
362
363
364 empty_bad_page_table = start_mem;
365 start_mem += PAGE_SIZE;
366 empty_bad_page = start_mem;
367 start_mem += PAGE_SIZE;
368 empty_zero_page = start_mem;
369 start_mem += PAGE_SIZE;
370 memset((void *)empty_zero_page, 0, PAGE_SIZE);
371
372 #if 0
373
374
375
376
377 swapper_pg_dir = (pgd_t *)get_kpointer_table();
378
379 init_mm.pgd = swapper_pg_dir;
380 #endif
381
382 memset (swapper_pg_dir, 0, sizeof(pgd_t)*PTRS_PER_PGD);
383 task[0]->tss.pagedir_v = (unsigned long *)swapper_pg_dir;
384 task[0]->tss.pagedir_p = VTOP (swapper_pg_dir);
385
386 #ifdef DEBUG
387 printk ("task 0 pagedir at %p virt, %#lx phys\n",
388 task[0]->tss.pagedir_v, task[0]->tss.pagedir_p);
389 #endif
390
391
392 task[0]->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
393 task[0]->tss.crp[1] = task[0]->tss.pagedir_p;
394
395 if (m68k_is040or060)
396 asm ("movel %0,%/d0\n\t"
397 ".long 0x4e7b0806"
398 :
399 : "g" (task[0]->tss.crp[1])
400 : "d0");
401 else
402 asm ("pmove %0@,%/crp"
403 :
404 : "a" (task[0]->tss.crp));
405
406 #ifdef DEBUG
407 printk ("set crp\n");
408 #endif
409
410
411
412
413 set_fs (USER_DS);
414
415 #ifdef DEBUG
416 printk ("before free_area_init\n");
417 #endif
418
419 #ifndef CONFIG_BLK_DEV_INITRD
420
421
422
423
424
425
426
427
428 ramdisk_length = boot_info.ramdisk_size * 1024;
429
430 if ((ramdisk_length > 0) && (ROOT_DEV == 0)) {
431 char *rdp;
432
433 rd_start = (char *) start_mem;
434
435
436 rdp = (char *)mm_phys_to_virt (boot_info.ramdisk_addr);
437
438
439 memcpy (rd_start, rdp, ramdisk_length);
440 start_mem += ramdisk_length;
441 rd_doload = 1;
442 }
443 #endif
444
445 return free_area_init (start_mem, end_mem);
446 }
447
448 void mem_init(unsigned long start_mem, unsigned long end_mem)
449 {
450 int codepages = 0;
451 int datapages = 0;
452 unsigned long tmp;
453 extern int _etext;
454
455 end_mem &= PAGE_MASK;
456 high_memory = end_mem;
457
458 start_mem = PAGE_ALIGN(start_mem);
459 while (start_mem < high_memory) {
460 clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
461 start_mem += PAGE_SIZE;
462 }
463
464 #ifdef CONFIG_ATARI
465
466 if (MACH_IS_ATARI) {
467
468
469
470
471
472
473
474 unsigned long virt0 = PTOV( 0 ), adr;
475 extern unsigned long rsvd_stram_beg, rsvd_stram_end;
476
477 if (virt0 != 0) {
478
479 set_bit(PG_reserved, &mem_map[MAP_NR(virt0)].flags);
480
481
482
483
484
485
486 rsvd_stram_end += PAGE_SIZE - 1;
487 rsvd_stram_end &= PAGE_MASK;
488 rsvd_stram_beg &= PAGE_MASK;
489 for( adr = rsvd_stram_beg; adr < rsvd_stram_end; adr += PAGE_SIZE )
490 set_bit(PG_reserved, &mem_map[MAP_NR(adr)].flags);
491 }
492 }
493
494 #endif
495 #ifdef DEBUG
496 printk ("task[0] root table is %p\n", task[0]->tss.pagedir_v);
497 #endif
498
499 for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
500 if (VTOP (tmp) >= mach_max_dma_address)
501 clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
502 if (PageReserved(mem_map+MAP_NR(tmp))) {
503 if (tmp < (unsigned long)&_etext)
504 codepages++;
505 else
506 datapages++;
507 continue;
508 }
509 mem_map[MAP_NR(tmp)].count = 1;
510 #ifdef CONFIG_BLK_DEV_INITRD
511 if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end))
512 #endif
513 free_page(tmp);
514 }
515 tmp = nr_free_pages << PAGE_SHIFT;
516 printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
517 tmp >> 10,
518 high_memory >> 10,
519 codepages << (PAGE_SHIFT-10),
520 datapages << (PAGE_SHIFT-10));
521 }
522
523 void si_meminfo(struct sysinfo *val)
524 {
525 unsigned long i;
526
527 i = high_memory >> PAGE_SHIFT;
528 val->totalram = 0;
529 val->sharedram = 0;
530 val->freeram = nr_free_pages << PAGE_SHIFT;
531 val->bufferram = buffermem;
532 while (i-- > 0) {
533 if (PageReserved(mem_map+i))
534 continue;
535 val->totalram++;
536 if (!mem_map[i].count)
537 continue;
538 val->sharedram += mem_map[i].count-1;
539 }
540 val->totalram <<= PAGE_SHIFT;
541 val->sharedram <<= PAGE_SHIFT;
542 return;
543 }