This source file includes following definitions.
- prom_probe_memory
- probe_memory
- sparc_lvl15_nmi
- do_sparc_fault
- force_user_fault
- window_overflow_fault
- window_underflow_fault
- window_ret_fault
1
2
3
4
5
6
7 #include <asm/head.h>
8
9 #include <linux/string.h>
10 #include <linux/types.h>
11 #include <linux/ptrace.h>
12 #include <linux/mman.h>
13 #include <linux/tasks.h>
14 #include <linux/smp.h>
15 #include <linux/signal.h>
16 #include <linux/mm.h>
17
18 #include <asm/system.h>
19 #include <asm/segment.h>
20 #include <asm/openprom.h>
21 #include <asm/idprom.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/memreg.h>
25 #include <asm/openprom.h>
26 #include <asm/oplib.h>
27 #include <asm/smp.h>
28 #include <asm/traps.h>
29 #include <asm/kdebug.h>
30
31 #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
32
33 extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
34 extern int prom_node_root;
35
36 extern void die_if_kernel(char *,struct pt_regs *);
37
38 struct linux_romvec *romvec;
39
40
41
42
43
44 int num_segmaps, num_contexts;
45 int invalid_segment;
46
47
48
49 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
50 int vac_entries_per_context, vac_entries_per_segment;
51 int vac_entries_per_page;
52
53
54 int prom_probe_memory (void)
55 {
56 register struct linux_mlist_v0 *mlist;
57 register unsigned long bytes, base_paddr, tally;
58 register int i;
59
60 i = 0;
61 mlist= *prom_meminfo()->v0_available;
62 bytes = tally = mlist->num_bytes;
63 base_paddr = (unsigned long) mlist->start_adr;
64
65 sp_banks[0].base_addr = base_paddr;
66 sp_banks[0].num_bytes = bytes;
67
68 while (mlist->theres_more != (void *) 0){
69 i++;
70 mlist = mlist->theres_more;
71 bytes = mlist->num_bytes;
72 tally += bytes;
73 if (i >= SPARC_PHYS_BANKS-1) {
74 printk ("The machine has more banks that this kernel can support\n"
75 "Increase the SPARC_PHYS_BANKS setting (currently %d)\n",
76 SPARC_PHYS_BANKS);
77 i = SPARC_PHYS_BANKS-1;
78 break;
79 }
80
81 sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
82 sp_banks[i].num_bytes = mlist->num_bytes;
83 }
84
85 i++;
86 sp_banks[i].base_addr = 0xdeadbeef;
87 sp_banks[i].num_bytes = 0;
88
89
90
91
92 for(i=0; sp_banks[i].num_bytes != 0; i++)
93 sp_banks[i].num_bytes &= PAGE_MASK;
94
95 return tally;
96 }
97
98
99
100
101 unsigned long
102 probe_memory(void)
103 {
104 int total;
105
106 total = prom_probe_memory();
107
108
109 return total;
110 }
111
112 extern void sun4c_complete_all_stores(void);
113
114
115 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
116 unsigned long svaddr, unsigned long aerr,
117 unsigned long avaddr)
118 {
119 sun4c_complete_all_stores();
120 printk("FAULT: NMI received\n");
121 printk("SREGS: Synchronous Error %08lx\n", serr);
122 printk(" Synchronous Vaddr %08lx\n", svaddr);
123 printk(" Asynchronous Error %08lx\n", aerr);
124 printk(" Asynchronous Vaddr %08lx\n", avaddr);
125 printk("REGISTER DUMP:\n");
126 show_regs(regs);
127 prom_halt();
128 }
129
130 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
131 unsigned long address)
132 {
133 struct vm_area_struct *vma;
134 int from_user = !(regs->psr & PSR_PS);
135
136 #if 0
137 printk("CPU[%d]: f<pid=%d,tf=%d,wr=%d,addr=%08lx",
138 smp_processor_id(), current->pid, text_fault,
139 write, address);
140 printk(",pc=%08lx> ", regs->pc);
141 #endif
142
143 if(text_fault)
144 address = regs->pc;
145
146
147
148
149
150
151 if(!from_user && address >= KERNBASE) {
152 #ifdef __SMP__
153 printk("CPU[%d]: Kernel faults at addr=%08lx\n",
154 smp_processor_id(), address);
155 while(1)
156 ;
157 #else
158 quick_kernel_fault(address);
159 return;
160 #endif
161 }
162
163 vma = find_vma(current, address);
164 if(!vma)
165 goto bad_area;
166 if(vma->vm_start <= address)
167 goto good_area;
168 if(!(vma->vm_flags & VM_GROWSDOWN))
169 goto bad_area;
170 if(expand_stack(vma, address))
171 goto bad_area;
172
173
174
175
176 good_area:
177 if(write) {
178 if(!(vma->vm_flags & VM_WRITE))
179 goto bad_area;
180 } else {
181
182 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
183 goto bad_area;
184 }
185 handle_mm_fault(vma, address, write);
186 return;
187
188
189
190
191 bad_area:
192 if(from_user) {
193 #if 0
194 printk("%s [%d]: segfaults at %08lx pc=%08lx\n",
195 current->comm, current->pid, address, regs->pc);
196 #endif
197 current->tss.sig_address = address;
198 current->tss.sig_desc = SUBSIG_NOMAPPING;
199 send_sig(SIGSEGV, current, 1);
200 return;
201 }
202 if((unsigned long) address < PAGE_SIZE) {
203 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
204 } else
205 printk(KERN_ALERT "Unable to handle kernel paging request");
206 printk(" at virtual address %08lx\n",address);
207 printk(KERN_ALERT "current->mm->context = %08lx\n",
208 (unsigned long) current->mm->context);
209 printk(KERN_ALERT "current->mm->pgd = %08lx\n",
210 (unsigned long) current->mm->pgd);
211 die_if_kernel("Oops", regs);
212 }
213
214
215 inline void force_user_fault(unsigned long address, int write)
216 {
217 struct vm_area_struct *vma;
218
219 vma = find_vma(current, address);
220 if(!vma)
221 goto bad_area;
222 if(vma->vm_start <= address)
223 goto good_area;
224 if(!(vma->vm_flags & VM_GROWSDOWN))
225 goto bad_area;
226 if(expand_stack(vma, address))
227 goto bad_area;
228 good_area:
229 if(write)
230 if(!(vma->vm_flags & VM_WRITE))
231 goto bad_area;
232 else
233 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
234 goto bad_area;
235 handle_mm_fault(vma, address, write);
236 return;
237 bad_area:
238 current->tss.sig_address = address;
239 current->tss.sig_desc = SUBSIG_NOMAPPING;
240 send_sig(SIGSEGV, current, 1);
241 return;
242 }
243
244 void window_overflow_fault(void)
245 {
246 unsigned long sp = current->tss.rwbuf_stkptrs[0];
247
248 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
249 force_user_fault(sp + 0x38, 1);
250 force_user_fault(sp, 1);
251 }
252
253 void window_underflow_fault(unsigned long sp)
254 {
255 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
256 force_user_fault(sp + 0x38, 0);
257 force_user_fault(sp, 0);
258 }
259
260 void window_ret_fault(struct pt_regs *regs)
261 {
262 unsigned long sp = regs->u_regs[UREG_FP];
263
264 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
265 force_user_fault(sp + 0x38, 0);
266 force_user_fault(sp, 0);
267 }