This source file includes following definitions.
- smp_info
- swap
- smp_store_cpu_info
- smp_commence
- smp_callin
- cpu_panic
- smp_boot_cpus
- send_ipi
- smp_message_pass
- smp_cross_call
- smp_flush_cache_all
- smp_flush_tlb_all
- smp_flush_cache_mm
- smp_flush_tlb_mm
- smp_flush_cache_range
- smp_flush_tlb_range
- smp_flush_cache_page
- smp_flush_tlb_page
- smp_flush_page_to_ram
- smp_reschedule_irq
- smp_capture
- smp_release
- smp_message_irq
1
2
3
4
5
6 #include <asm/head.h>
7 #include <asm/ptrace.h>
8
9 #include <linux/kernel.h>
10 #include <linux/tasks.h>
11 #include <linux/smp.h>
12
13 #include <asm/delay.h>
14 #include <asm/irq.h>
15 #include <asm/page.h>
16 #include <asm/pgtable.h>
17 #include <asm/oplib.h>
18
19 extern ctxd_t *srmmu_ctx_table_phys;
20 extern int linux_num_cpus;
21
22 struct tlog {
23 unsigned long pc;
24 unsigned long psr;
25 };
26
27 struct tlog trap_log[4][256];
28 unsigned long trap_log_ent[4] = { 0, 0, 0, 0, };
29
30 extern void calibrate_delay(void);
31
32 volatile unsigned long stuck_pc = 0;
33 volatile int smp_processors_ready = 0;
34
35 int smp_found_config = 0;
36 unsigned long cpu_present_map = 0;
37 int smp_num_cpus = 1;
38 int smp_threads_ready=0;
39 unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
40 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
41 volatile unsigned long smp_invalidate_needed[NR_CPUS] = { 0, };
42 volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
43 struct cpuinfo_sparc cpu_data[NR_CPUS];
44 unsigned char boot_cpu_id = 0;
45 static int smp_activated = 0;
46 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
47 static volatile unsigned long smp_msg_data;
48 static volatile int smp_src_cpu;
49 static volatile int smp_msg_id;
50 volatile int cpu_number_map[NR_CPUS];
51 volatile int cpu_logical_map[NR_CPUS];
52
53
54
55
56
57
58
59
60 klock_t kernel_flag = KLOCK_CLEAR;
61 volatile unsigned char active_kernel_processor = NO_PROC_ID;
62 volatile unsigned long kernel_counter = 0;
63 volatile unsigned long syscall_count = 0;
64 volatile unsigned long ipi_count;
65 #ifdef __SMP_PROF__
66 volatile unsigned long smp_spins[NR_CPUS]={0};
67 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
68 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
69 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
70 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
71 #endif
72 #if defined (__SMP_PROF__)
73 volatile unsigned long smp_idle_map=0;
74 #endif
75
76 volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};
77 volatile int smp_process_available=0;
78
79
80
81 #ifdef SMP_DEBUG
82 #define SMP_PRINTK(x) printk x
83 #else
84 #define SMP_PRINTK(x)
85 #endif
86
87 static volatile int smp_commenced = 0;
88
89 static char smp_buf[512];
90
91 char *smp_info(void)
92 {
93 sprintf(smp_buf,
94 "\n CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
95 "State: %s\t\t%s\t\t%s\t\t%s\n"
96 "Lock: %08lx\t\t%08lx\t%08lx\t%08lx\n"
97 "\n"
98 "klock: %x\n",
99 (cpu_present_map & 1) ? ((active_kernel_processor == 0) ? "akp" : "online") : "offline",
100 (cpu_present_map & 2) ? ((active_kernel_processor == 1) ? "akp" : "online") : "offline",
101 (cpu_present_map & 4) ? ((active_kernel_processor == 2) ? "akp" : "online") : "offline",
102 (cpu_present_map & 8) ? ((active_kernel_processor == 3) ? "akp" : "online") : "offline",
103 smp_proc_in_lock[0], smp_proc_in_lock[1], smp_proc_in_lock[2],
104 smp_proc_in_lock[3],
105 kernel_flag);
106 return smp_buf;
107 }
108
109 static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
110 {
111 __asm__ __volatile__("swap [%1], %0\n\t" :
112 "=&r" (val), "=&r" (ptr) :
113 "0" (val), "1" (ptr));
114 return val;
115 }
116
117
118
119
120
121
122 void smp_store_cpu_info(int id)
123 {
124 cpu_data[id].udelay_val = loops_per_sec;
125 }
126
127
128
129
130
131
132
133
134
135
136
137 void smp_commence(void)
138 {
139
140
141
142 local_flush_cache_all();
143 local_flush_tlb_all();
144 smp_commenced = 1;
145 local_flush_cache_all();
146 local_flush_tlb_all();
147 }
148
149 void smp_callin(void)
150 {
151 int cpuid = smp_processor_id();
152
153 sti();
154 local_flush_cache_all();
155 local_flush_tlb_all();
156 calibrate_delay();
157 smp_store_cpu_info(cpuid);
158 local_flush_cache_all();
159 local_flush_tlb_all();
160 cli();
161
162
163 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
164 local_flush_cache_all();
165 local_flush_tlb_all();
166 while(!smp_commenced)
167 barrier();
168 local_flush_cache_all();
169 local_flush_tlb_all();
170
171
172 current->mm->mmap->vm_page_prot = PAGE_SHARED;
173 current->mm->mmap->vm_start = KERNBASE;
174 current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
175
176 local_flush_cache_all();
177 local_flush_tlb_all();
178
179 sti();
180 }
181
182 void cpu_panic(void)
183 {
184 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
185 panic("SMP bolixed\n");
186 }
187
188
189
190
191
192 extern struct prom_cpuinfo linux_cpus[NCPUS];
193 static struct linux_prom_registers penguin_ctable;
194
195 void smp_boot_cpus(void)
196 {
197 int cpucount = 0;
198 int i = 0;
199
200 printk("Entering SparclinuxMultiPenguin(SMP) Mode...\n");
201
202 penguin_ctable.which_io = 0;
203 penguin_ctable.phys_addr = (char *) srmmu_ctx_table_phys;
204 penguin_ctable.reg_size = 0;
205
206 sti();
207 cpu_present_map |= (1 << smp_processor_id());
208 cpu_present_map = 0;
209 for(i=0; i < linux_num_cpus; i++)
210 cpu_present_map |= (1<<i);
211 for(i=0; i < NR_CPUS; i++)
212 cpu_number_map[i] = -1;
213 for(i=0; i < NR_CPUS; i++)
214 cpu_logical_map[i] = -1;
215 mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
216 cpu_number_map[boot_cpu_id] = 0;
217 cpu_logical_map[0] = boot_cpu_id;
218 active_kernel_processor = boot_cpu_id;
219 smp_store_cpu_info(boot_cpu_id);
220 set_irq_udt(0);
221 local_flush_cache_all();
222 if(linux_num_cpus == 1)
223 return;
224 for(i = 0; i < NR_CPUS; i++) {
225 if(i == boot_cpu_id)
226 continue;
227
228 if(cpu_present_map & (1 << i)) {
229 extern unsigned long sparc_cpu_startup;
230 unsigned long *entry = &sparc_cpu_startup;
231 int timeout;
232
233
234 entry += ((i-1) * 6);
235
236
237 printk("Starting CPU %d at %p\n", i, entry);
238 mid_xlate[i] = (linux_cpus[i].mid & ~8);
239 local_flush_cache_all();
240 prom_startcpu(linux_cpus[i].prom_node,
241 &penguin_ctable, 0, (char *)entry);
242
243
244 for(timeout = 0; timeout < 5000000; timeout++) {
245 if(cpu_callin_map[i])
246 break;
247 udelay(100);
248 }
249 if(cpu_callin_map[i]) {
250
251 cpucount++;
252 cpu_number_map[i] = i;
253 cpu_logical_map[i] = i;
254 } else {
255 printk("Penguin %d is stuck in the bottle.\n", i);
256 }
257 }
258 if(!(cpu_callin_map[i])) {
259 cpu_present_map &= ~(1 << i);
260 cpu_number_map[i] = -1;
261 }
262 }
263 local_flush_cache_all();
264 if(cpucount == 0) {
265 printk("Error: only one Penguin found.\n");
266 cpu_present_map = (1 << smp_processor_id());
267 } else {
268 unsigned long bogosum = 0;
269 for(i = 0; i < NR_CPUS; i++) {
270 if(cpu_present_map & (1 << i))
271 bogosum += cpu_data[i].udelay_val;
272 }
273 printk("Total of %d Penguins activated (%lu.%02lu PenguinMIPS).\n",
274 cpucount + 1,
275 (bogosum + 2500)/500000,
276 ((bogosum + 2500)/5000)%100);
277 smp_activated = 1;
278 smp_num_cpus = cpucount + 1;
279 }
280 smp_processors_ready = 1;
281 }
282
283 static inline void send_ipi(unsigned long target_map, int irq)
284 {
285 int i;
286
287 for(i = 0; i < 4; i++) {
288 if((1<<i) & target_map)
289 set_cpu_int(mid_xlate[i], irq);
290 }
291 }
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308 static volatile int message_cpu = NO_PROC_ID;
309
310 void smp_message_pass(int target, int msg, unsigned long data, int wait)
311 {
312 unsigned long target_map;
313 int p = smp_processor_id();
314 int irq = 15;
315 int i;
316
317
318
319
320 if(!smp_processors_ready)
321 return;
322
323
324
325
326
327 if(msg == MSG_RESCHEDULE) {
328 irq = 13;
329 if(smp_cpu_in_msg[p])
330 return;
331 }
332
333
334
335
336
337
338
339 if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU && msg != MSG_RESCHEDULE) {
340 printk("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
341 smp_processor_id(),msg,message_cpu, smp_msg_id);
342
343
344
345
346
347 smp_cpu_in_msg[p] = 0;
348 intr_count = 0;
349 smp_swap(&message_cpu, NO_PROC_ID);
350 sti();
351 printk("spinning, please L1-A, type ctrace and send output to davem\n");
352 while(1)
353 barrier();
354 }
355 smp_swap(&message_cpu, smp_processor_id());
356
357
358 smp_cpu_in_msg[p]++;
359
360
361 if(msg != MSG_RESCHEDULE) {
362 smp_src_cpu = p;
363 smp_msg_id = msg;
364 smp_msg_data = data;
365 }
366
367 #if 0
368 printk("SMP message pass from cpu %d to cpu %d msg %d\n", p, target, msg);
369 #endif
370
371
372 for(i = 0; i < smp_num_cpus; i++)
373 swap((unsigned long *) &cpu_callin_map[i], 0);
374 if(target == MSG_ALL_BUT_SELF) {
375 target_map = (cpu_present_map & ~(1<<p));
376 swap((unsigned long *) &cpu_callin_map[p], 1);
377 } else if(target == MSG_ALL) {
378 target_map = cpu_present_map;
379 } else {
380 for(i = 0; i < smp_num_cpus; i++)
381 if(i != target)
382 swap((unsigned long *) &cpu_callin_map[i], 1);
383 target_map = (1<<target);
384 }
385
386
387 send_ipi(target_map, irq);
388
389 switch(wait) {
390 case 1:
391 for(i = 0; i < smp_num_cpus; i++)
392 while(!cpu_callin_map[i])
393 barrier();
394 break;
395 case 2:
396 for(i = 0; i < smp_num_cpus; i++)
397 while(smp_invalidate_needed[i])
398 barrier();
399 break;
400 case 3:
401
402
403
404 return;
405 }
406 smp_cpu_in_msg[p]--;
407 smp_swap(&message_cpu, NO_PROC_ID);
408 }
409
410 struct smp_funcall {
411 smpfunc_t func;
412 unsigned long arg1;
413 unsigned long arg2;
414 unsigned long arg3;
415 unsigned long arg4;
416 unsigned long arg5;
417 unsigned long processors_in[NR_CPUS];
418 unsigned long processors_out[NR_CPUS];
419 } ccall_info;
420
421
422
423
424
425 #define CCALL_TIMEOUT 5000000
426
427
428
429
430
431
432
433
434
435
436 void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
437 unsigned long arg3, unsigned long arg4, unsigned long arg5)
438 {
439 unsigned long me = smp_processor_id();
440 unsigned long flags;
441 int i, timeout;
442
443 #ifdef DEBUG_CCALL
444 printk("xc%d<", me);
445 #endif
446 if(smp_processors_ready) {
447 save_flags(flags); cli();
448 if(me != active_kernel_processor)
449 goto cross_call_not_master;
450
451
452 ccall_info.func = func;
453 ccall_info.arg1 = arg1;
454 ccall_info.arg2 = arg2;
455 ccall_info.arg3 = arg3;
456 ccall_info.arg4 = arg4;
457 ccall_info.arg5 = arg5;
458
459
460 for(i = 0; i < smp_num_cpus; i++) {
461 ccall_info.processors_in[i] = 0;
462 ccall_info.processors_out[i] = 0;
463 }
464 ccall_info.processors_in[me] = 1;
465 ccall_info.processors_out[me] = 1;
466
467
468 smp_message_pass(MSG_ALL_BUT_SELF, MSG_CROSS_CALL, 0, 3);
469
470
471
472
473 timeout = CCALL_TIMEOUT;
474 for(i = 0; i < smp_num_cpus; i++) {
475 while(!ccall_info.processors_in[i] && timeout-- > 0)
476 barrier();
477 if(!ccall_info.processors_in[i])
478 goto procs_time_out;
479 }
480 #ifdef DEBUG_CCALL
481 printk("I");
482 #endif
483
484
485 func(arg1, arg2, arg3, arg4, arg5);
486
487
488 timeout = CCALL_TIMEOUT;
489 for(i = 0; i < smp_num_cpus; i++) {
490 while(!ccall_info.processors_out[i] && timeout-- > 0)
491 barrier();
492 if(!ccall_info.processors_out[i])
493 goto procs_time_out;
494 }
495 #ifdef DEBUG_CCALL
496 printk("O>");
497 #endif
498
499 smp_cpu_in_msg[me]--;
500 smp_swap(&message_cpu, NO_PROC_ID);
501 restore_flags(flags);
502 return;
503
504 procs_time_out:
505 printk("smp: Wheee, penguin drops off the bus\n");
506 smp_cpu_in_msg[me]--;
507 message_cpu = NO_PROC_ID;
508 restore_flags(flags);
509 return;
510 }
511
512
513 func(arg1, arg2, arg3, arg4, arg5);
514 return;
515
516 cross_call_not_master:
517 printk("Cross call initiated by non master cpu\n");
518 printk("akp=%x me=%08lx\n", active_kernel_processor, me);
519 restore_flags(flags);
520 panic("penguin cross call");
521 }
522
523 void smp_flush_cache_all(void)
524 { xc0((smpfunc_t) local_flush_cache_all); }
525
526 void smp_flush_tlb_all(void)
527 { xc0((smpfunc_t) local_flush_tlb_all); }
528
529 void smp_flush_cache_mm(struct mm_struct *mm)
530 {
531 if(mm->context != NO_CONTEXT)
532 xc1((smpfunc_t) local_flush_cache_mm, (unsigned long) mm);
533 }
534
535 void smp_flush_tlb_mm(struct mm_struct *mm)
536 {
537 if(mm->context != NO_CONTEXT)
538 xc1((smpfunc_t) local_flush_tlb_mm, (unsigned long) mm);
539 }
540
541 void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
542 unsigned long end)
543 {
544 if(mm->context != NO_CONTEXT)
545 xc3((smpfunc_t) local_flush_cache_range, (unsigned long) mm,
546 start, end);
547 }
548
549 void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
550 unsigned long end)
551 {
552 if(mm->context != NO_CONTEXT)
553 xc3((smpfunc_t) local_flush_tlb_range, (unsigned long) mm,
554 start, end);
555 }
556
557 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
558 { xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); }
559
560 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
561 { xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); }
562
563 void smp_flush_page_to_ram(unsigned long page)
564 { xc1((smpfunc_t) local_flush_page_to_ram, page); }
565
566
567 void smp_reschedule_irq(void)
568 {
569 if(smp_processor_id() != active_kernel_processor)
570 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
571 smp_processor_id(), active_kernel_processor);
572
573 need_resched=1;
574 }
575
576
577
578
579
580 static volatile unsigned long release = 1;
581 static volatile int capture_level = 0;
582
583 void smp_capture(void)
584 {
585 unsigned long flags;
586
587 if(!smp_activated || !smp_commenced)
588 return;
589 #ifdef DEBUG_CAPTURE
590 printk("C<%d>", smp_processor_id());
591 #endif
592 save_flags(flags); cli();
593 if(!capture_level) {
594 release = 0;
595 smp_message_pass(MSG_ALL_BUT_SELF, MSG_CAPTURE, 0, 1);
596 }
597 capture_level++;
598 restore_flags(flags);
599 }
600
601 void smp_release(void)
602 {
603 unsigned long flags;
604 int i;
605
606 if(!smp_activated || !smp_commenced)
607 return;
608 #ifdef DEBUG_CAPTURE
609 printk("R<%d>", smp_processor_id());
610 #endif
611 save_flags(flags); cli();
612 if(!(capture_level - 1)) {
613 release = 1;
614 for(i = 0; i < smp_num_cpus; i++)
615 while(cpu_callin_map[i])
616 barrier();
617 }
618 capture_level -= 1;
619 restore_flags(flags);
620 }
621
622
623
624
625
626
627
628
629 void smp_message_irq(void)
630 {
631 int i=smp_processor_id();
632
633 switch(smp_msg_id) {
634 case MSG_CROSS_CALL:
635
636 ccall_info.processors_in[i] = 1;
637 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
638 ccall_info.arg4, ccall_info.arg5);
639 ccall_info.processors_out[i] = 1;
640 break;
641
642
643
644
645 case MSG_STOP_CPU:
646 sti();
647 while(1)
648 barrier();
649
650 default:
651 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
652 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
653 break;
654 }
655 }