1 /*
2 * Intel MP v1.1 specification support routines for multi-pentium
3 * hosts.
4 *
5 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
6 * Supported by Caldera http://www.caldera.com.
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * This code is released under the GNU public license version 2 or
11 * later.
12 *
13 * Fixes
14 * Felix Koop : NR_CPUS used properly
15 * Jose Renau : Handle single CPU case.
16 * Alan Cox : By repeated request 8) - Total BogoMIP report.
17 *
18 */
19
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/config.h>
23 #include <linux/timer.h>
24 #include <linux/sched.h>
25 #include <linux/mm.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/delay.h>
28 #include <asm/i82489.h>
29 #include <linux/smp.h>
30 #include <asm/pgtable.h>
31 #include <asm/bitops.h>
32 #include <asm/smp.h>
33
34 extern void *vremap(unsigned long offset, unsigned long size); /* Linus hasnt put this in the headers yet */
35
36 static int smp_found_config=0; /* Have we found an SMP box */
37
38 unsigned long cpu_present_map = 0; /* Bitmask of existing CPU's */
39 int smp_num_cpus; /* Total count of live CPU's */
40 int smp_threads_ready=0; /* Set when the idlers are all forked */
41 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; /* We always use 0 the rest is ready for parallel delivery */
42 volatile unsigned long smp_invalidate_needed; /* Used for the invalidate map thats also checked in the spinlock */
43 struct cpuinfo_x86 cpu_data[NR_CPUS]; /* Per cpu bogomips and other parameters */
44 static unsigned int num_processors = 0; /* Internal processor count */
45 static unsigned long io_apic_addr = 0; /* Address of the I/O apic (not yet used) */
46 unsigned char boot_cpu_id = 0; /* Processor that is doing the boot up */
47 static unsigned char *kstack_base,*kstack_end; /* Kernel stack list pointers */
48 static int smp_activated = 0; /* Tripped once we need to start cross invalidating */
49 static volatile int smp_commenced=0; /* Tripped when we start scheduling */
50 static unsigned char nlong=0; /* Apparent value for boot CPU */
51 unsigned char *apic_reg=((unsigned char *)&nlong)-0x20; /* Later set to the vremap() of the APIC */
52 unsigned long apic_retval; /* Just debugging the assembler.. */
53 unsigned char *kernel_stacks[NR_CPUS]; /* Kernel stack pointers for CPU's (debugging) */
54
55 static volatile unsigned char smp_cpu_in_msg[NR_CPUS]; /* True if this processor is sending an IPI */
56 static volatile unsigned long smp_msg_data; /* IPI data pointer */
57 static volatile int smp_src_cpu; /* IPI sender processor */
58 static volatile int smp_msg_id; /* Message being sent */
59
60 volatile unsigned long kernel_flag=0; /* Kernel spinlock */
61 volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */
62 volatile unsigned long kernel_counter=0; /* Number of times the processor holds the lock */
63 volatile unsigned long syscall_count=0; /* Number of times the processor holds the syscall lock */
64 volatile unsigned long smp_spins=0; /* Count of cycles wasted to spinning */
65
66 volatile unsigned long ipi_count; /* Number of IPI's delivered */
67
68 /*
69 * Checksum an MP configuration block.
70 */
71
72 static int mpf_checksum(unsigned char *mp, int len)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
73 {
74 int sum=0;
75 while(len--)
76 sum+=*mp++;
77 return sum&0xFF;
78 }
79
80 /*
81 * Processor encoding in an MP configuration block
82 */
83
84 static char *mpc_family(int family,int model)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
85 {
86 static char n[32];
87 static char *model_defs[]=
88 {
89 "80486DX","80486DX",
90 "80486SX","80486DX/2 or 80487",
91 "80486SL","Intel5X2(tm)",
92 "Unknown","Unknown",
93 "80486DX/4"
94 };
95 if(family==0x5)
96 return("Pentium(tm)");
97 if(family==0x0F && model==0x0F)
98 return("Special controller");
99 if(family==0x04 && model<9)
100 return model_defs[model];
101 sprintf(n,"Unknown CPU [%d:%d]",family, model);
102 return n;
103 }
104
105 /*
106 * Read the MPC
107 */
108
109 static int smp_read_mpc(struct mp_config_table *mpc)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
110 {
111 char str[16];
112 int count=sizeof(*mpc);
113 int apics=0;
114 unsigned char *mpt=((unsigned char *)mpc)+count;
115
116 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
117 {
118 printk("Bad signature [%c%c%c%c].\n",
119 mpc->mpc_signature[0],
120 mpc->mpc_signature[1],
121 mpc->mpc_signature[2],
122 mpc->mpc_signature[3]);
123 return 1;
124 }
125 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
126 {
127 printk("Checksum error.\n");
128 return 1;
129 }
130 if(mpc->mpc_spec!=0x01)
131 {
132 printk("Unsupported version (%d)\n",mpc->mpc_spec);
133 return 1;
134 }
135 memcpy(str,mpc->mpc_oem,8);
136 str[8]=0;
137 printk("OEM ID: %s ",str);
138 memcpy(str,mpc->mpc_productid,12);
139 str[12]=0;
140 printk("Product ID: %s ",str);
141 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
142
143 /*
144 * Now process the configuration blocks.
145 */
146
147 while(count<mpc->mpc_length)
148 {
149 switch(*mpt)
150 {
151 case MP_PROCESSOR:
152 {
153 struct mpc_config_processor *m=
154 (struct mpc_config_processor *)mpt;
155 if(m->mpc_cpuflag&CPU_ENABLED)
156 {
157 printk("Processor #%d %s APIC version %d\n",
158 m->mpc_apicid,
159 mpc_family((m->mpc_cpufeature&
160 CPU_FAMILY_MASK)>>8,
161 (m->mpc_cpufeature&
162 CPU_MODEL_MASK)>>4),
163 m->mpc_apicver);
164 if(m->mpc_featureflag&(1<<0))
165 printk(" Floating point unit present.\n");
166 if(m->mpc_featureflag&(1<<7))
167 printk(" Machine Exception supported.\n");
168 if(m->mpc_featureflag&(1<<8))
169 printk(" 64 bit compare & exchange supported.\n");
170 if(m->mpc_featureflag&(1<<9))
171 printk(" Internal APIC present.\n");
172 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
173 {
174 printk(" Bootup CPU\n");
175 boot_cpu_id=m->mpc_apicid;
176 nlong = boot_cpu_id<<24; /* Dummy 'self' for bootup */
177 }
178 else /* Boot CPU already counted */
179 num_processors++;
180
181 if(m->mpc_apicid>NR_CPUS)
182 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
183 else
184 cpu_present_map|=(1<<m->mpc_apicid);
185 }
186 mpt+=sizeof(*m);
187 count+=sizeof(*m);
188 break;
189 }
190 case MP_BUS:
191 {
192 struct mpc_config_bus *m=
193 (struct mpc_config_bus *)mpt;
194 memcpy(str,m->mpc_bustype,6);
195 str[6]=0;
196 printk("Bus #%d is %s\n",
197 m->mpc_busid,
198 str);
199 mpt+=sizeof(*m);
200 count+=sizeof(*m);
201 break;
202 }
203 case MP_IOAPIC:
204 {
205 struct mpc_config_ioapic *m=
206 (struct mpc_config_ioapic *)mpt;
207 if(m->mpc_flags&MPC_APIC_USABLE)
208 {
209 apics++;
210 printk("I/O APIC #%d Version %d at 0x%lX.\n",
211 m->mpc_apicid,m->mpc_apicver,
212 m->mpc_apicaddr);
213 io_apic_addr = m->mpc_apicaddr;
214 }
215 mpt+=sizeof(*m);
216 count+=sizeof(*m);
217 break;
218 }
219 case MP_INTSRC:
220 {
221 struct mpc_config_intsrc *m=
222 (struct mpc_config_intsrc *)mpt;
223
224 mpt+=sizeof(*m);
225 count+=sizeof(*m);
226 break;
227 }
228 case MP_LINTSRC:
229 {
230 struct mpc_config_intlocal *m=
231 (struct mpc_config_intlocal *)mpt;
232 mpt+=sizeof(*m);
233 count+=sizeof(*m);
234 break;
235 }
236 }
237 }
238 if(apics>1)
239 printk("Warning: Multiple APIC's not supported.\n");
240 return num_processors;
241 }
242
243 /*
244 * Scan the memory blocks for an SMP configuration block.
245 */
246
247 void smp_scan_config(unsigned long base, unsigned long length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
248 {
249 unsigned long *bp=(unsigned long *)base;
250 struct intel_mp_floating *mpf;
251 num_processors = 1; /* The boot processor */
252
253 /* printk("Scan SMP from %p for %ld bytes.\n",
254 bp,length);*/
255 if(sizeof(*mpf)!=16)
256 printk("Error: MPF size\n");
257
258 while(length>0)
259 {
260 if(*bp==SMP_MAGIC_IDENT)
261 {
262 mpf=(struct intel_mp_floating *)bp;
263 if(mpf->mpf_length==1 &&
264 !mpf_checksum((unsigned char *)bp,16) &&
265 mpf->mpf_specification==1)
266 {
267 printk("Intel multiprocessing (MPv1.1) available.\n");
268 if(mpf->mpf_feature2&(1<<7))
269 printk(" IMCR and PIC mode supported.\n");
270 smp_found_config=1;
271 /*
272 * Now see if we need to read further.
273 */
274 if(mpf->mpf_feature1!=0)
275 {
276 num_processors=2;
277 printk("I/O APIC at 0xFEC00000.\n");
278 printk("Bus#0 is ");
279 }
280 switch(mpf->mpf_feature1)
281 {
282 case 1:
283 printk("ISA");
284 break;
285 case 2:
286 printk("EISA with no IRQ8 chaining");
287 break;
288 case 3:
289 printk("EISA");
290 break;
291 case 4:
292 printk("MCA");
293 break;
294 case 5:
295 printk("ISA\nBus#1 is PCI");
296 break;
297 case 6:
298 printk("EISA\nBus #1 is PCI");
299 break;
300 case 7:
301 printk("MCA\nBus #1 is PCI");
302 break;
303 case 0:
304 break;
305 default:
306 printk("???\nUnknown standard configuration %d\n",
307 mpf->mpf_feature1);
308 return;
309 }
310 /*
311 * Read the physical hardware table. If there isn't one
312 * the processors present are 0 and 1.
313 */
314 if(mpf->mpf_physptr)
315 smp_read_mpc((void *)mpf->mpf_physptr);
316 else
317 cpu_present_map=3;
318 printk("Processors: %d\n", num_processors);
319 }
320 }
321 bp+=4;
322 length-=16;
323 }
324 }
325
326 /*
327 * Trampoline 80x86 program as an array.
328 */
329
330 static unsigned char trampoline_data[]={
331 #include "trampoline.hex"
332 };
333
334 /*
335 * Currently trivial. Write the real->protected mode
336 * bootstrap into the page concerned. The caller
337 * has made sure its suitably aligned.
338 */
339
340 static void install_trampoline(unsigned char *mp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
341 {
342 memcpy(mp,trampoline_data,sizeof(trampoline_data));
343 }
344
345 /*
346 * We are called very early to get the low memory for the trampoline/kernel stacks
347 * This has to be done by mm/init.c to parcel us out nice low memory. We allocate
348 * the kernel stacks at 4K, 8K, 12K... currently (0-03FF is preserved for SMM and
349 * other things).
350 */
351
352 unsigned long smp_alloc_memory(unsigned long mem_base)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
353 {
354 int size=(num_processors-1)*PAGE_SIZE; /* Number of stacks needed */
355 /*
356 * Our stacks have to be below the 1Mb line, and mem_base on entry
357 * is 4K aligned.
358 */
359
360 if(mem_base+size>=0x9F000)
361 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
362 kstack_base=(void *)mem_base;
363 mem_base+=size;
364 kstack_end=(void *)mem_base;
365 return mem_base;
366 }
367
368 /*
369 * Hand out stacks one at a time.
370 */
371
372 static void *get_kernel_stack(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
373 {
374 void *stack=kstack_base;
375 if(kstack_base>=kstack_end)
376 return NULL;
377 kstack_base+=PAGE_SIZE;
378 return stack;
379 }
380
381
382 /*
383 * The bootstrap kernel entry code has set these up. Save them for
384 * a given CPU
385 */
386
387 void smp_store_cpu_info(int id)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
388 {
389 struct cpuinfo_x86 *c=&cpu_data[id];
390 c->hard_math=hard_math; /* Always assumed same currently */
391 c->x86=x86;
392 c->x86_model=x86_model;
393 c->x86_mask=x86_mask;
394 c->x86_capability=x86_capability;
395 c->fdiv_bug=fdiv_bug;
396 c->wp_works_ok=wp_works_ok; /* Always assumed the same currently */
397 c->hlt_works_ok=hlt_works_ok;
398 c->udelay_val=loops_per_sec;
399 strcpy(c->x86_vendor_id, x86_vendor_id);
400 }
401
402 /*
403 * Architecture specific routine called by the kernel just before init is
404 * fired off. This allows the BP to have everything in order [we hope].
405 * At the end of this all the AP's will hit the system scheduling and off
406 * we go. Each AP will load the system gdt's and jump through the kernel
407 * init into idle(). At this point the scheduler will one day take over
408 * and give them jobs to do. smp_callin is a standard routine
409 * we use to track CPU's as they power up.
410 */
411
412 void smp_commence(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
413 {
414 /*
415 * Lets the callin's below out of their loop.
416 */
417 smp_commenced=1;
418 }
419
420 void smp_callin(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
421 {
422 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
423 unsigned long l;
424 /*
425 * Activate our APIC
426 */
427
428 /* printk("CALLIN %d\n",smp_processor_id());*/
429 l=apic_read(APIC_SPIV);
430 l|=(1<<8); /* Enable */
431 apic_write(APIC_SPIV,l);
432 sti();
433 /*
434 * Get our bogomips.
435 */
436 calibrate_delay();
437 /*
438 * Save our processor parameters
439 */
440 smp_store_cpu_info(cpuid);
441 /*
442 * Allow the master to continue.
443 */
444 set_bit(cpuid, &cpu_callin_map[0]);
445 /*
446 * Until we are ready for SMP scheduling
447 */
448 load_ldt(0);
449 /* printk("Testing faulting...\n");
450 *(long *)0=1; OOPS... */
451 local_invalidate();
452 while(!smp_commenced);
453 local_invalidate();
454 /* printk("Commenced..\n");*/
455
456 /* This assumes the processor id's are consecutive 0..n-1 - FIXME */
457 load_TR(cpuid);
458 /* while(1);*/
459 }
460
461 /*
462 * Cycle through the processors sending pentium IPI's to boot each.
463 */
464
465 void smp_boot_cpus(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
466 {
467 int i=0;
468 int cpucount=0;
469 void *stack;
470 extern unsigned long init_user_stack[];
471
472 /*
473 * Map the local APIC into kernel space
474 */
475
476 apic_reg = vremap(0xFEE00000,4096);
477
478
479 if(apic_reg == NULL)
480 panic("Unable to map local apic.\n");
481
482 /*
483 * Now scan the cpu present map and fire up anything we find.
484 */
485
486
487 kernel_stacks[boot_cpu_id]=(void *)init_user_stack; /* Set up for boot processor first */
488
489 smp_store_cpu_info(boot_cpu_id); /* Final full version of the data */
490
491 active_kernel_processor=boot_cpu_id;
492
493 for(i=0;i<NR_CPUS;i++)
494 {
495 if((cpu_present_map&(1<<i)) && i!=boot_cpu_id) /* Rebooting yourself is a bad move */
496 {
497 unsigned long cfg;
498 int timeout;
499
500 /*
501 * We need a kernel stack for each processor.
502 */
503
504 stack=get_kernel_stack(); /* We allocated these earlier */
505 if(stack==NULL)
506 panic("No memory for processor stacks.\n");
507 kernel_stacks[i]=stack;
508 install_trampoline(stack);
509
510 printk("Booting processor %d stack %p: ",i,stack); /* So we set whats up */
511
512 /*
513 * Enable the local APIC
514 */
515
516 cfg=apic_read(APIC_SPIV);
517 cfg|=(1<<8); /* Enable APIC */
518 apic_write(APIC_SPIV,cfg);
519
520 /*
521 * This gunge sends an IPI (Inter Processor Interrupt) to the
522 * processor we wish to wake. When the startup IPI is received
523 * the target CPU does a real mode jump to the stack base.
524 */
525
526 cfg=apic_read(APIC_ICR2);
527 cfg&=0x00FFFFFF;
528 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i)); /* Target chip */
529 cfg=apic_read(APIC_ICR);
530 cfg&=~0xFDFFF ; /* Clear bits */
531 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12); /* Boot on the stack */
532 apic_write(APIC_ICR, cfg); /* Kick the second */
533 udelay(10); /* Masses of time */
534 cfg=apic_read(APIC_ESR);
535 if(cfg&4) /* Send accept error */
536 printk("Processor refused startup request.\n");
537 else
538 {
539 for(timeout=0;timeout<50000;timeout++)
540 {
541 if(cpu_callin_map[0]&(1<<i))
542 break; /* It has booted */
543 udelay(100); /* Wait 5s total for a response */
544 }
545 if(cpu_callin_map[0]&(1<<i))
546 cpucount++;
547 else
548 {
549 /*
550 * At this point we should set up a BIOS warm start and try
551 * a RESTART IPI. The 486+82489 MP pair don't support STARTUP IPI's
552 */
553 if(*((unsigned char *)8192)==0xA5)
554 printk("Stuck ??\n");
555 else
556 printk("Not responding.\n");
557 cpu_present_map&=~(1<<i);
558 }
559 }
560 }
561 }
562 /*
563 * Allow the user to impress friends.
564 */
565 if(cpucount==0)
566 {
567 printk("Error: only one processor found.\n");
568 cpu_present_map=(1<<smp_processor_id());
569 }
570 else
571 {
572 unsigned long bogosum=0;
573 for(i=0;i<32;i++)
574 {
575 if(cpu_present_map&(1<<i))
576 bogosum+=cpu_data[i].udelay_val;
577 }
578 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
579 cpucount+1,
580 (bogosum+2500)/500000,
581 ((bogosum+2500)/5000)%100);
582 smp_activated=1;
583 smp_num_cpus=cpucount+1;
584 }
585 }
586
587
588 /*
589 * A non wait message cannot pass data or cpu source info. This current setup
590 * is only safe because the kernel lock owner is the only person who can send a message.
591 *
592 * Wrapping this whole block in a spinlock is not the safe answer either. A processor may
593 * get stuck with irq's off waiting to send a message and thus not replying to the person
594 * spinning for a reply....
595 *
596 * In the end invalidate ought to be the NMI and a very very short function (to avoid the old
597 * IDE disk problems), and other messages sent with IRQ's enabled in a civilised fashion. That
598 * will also boost performance.
599 */
600
601 void smp_message_pass(int target, int msg, unsigned long data, int wait)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
602 {
603 unsigned long cfg;
604 unsigned long target_map;
605 int p=smp_processor_id();
606 int irq=0x2d; /* IRQ 13 */
607 int ct=0;
608 static volatile int message_cpu = NO_PROC_ID;
609
610 /*
611 * During boot up send no messages
612 */
613
614 if(!smp_activated)
615 return;
616
617
618 /*
619 * Skip the reschedule if we are waiting to clear a
620 * message at this time. The reschedule cannot wait
621 * but is not critical.
622 */
623
624 if(msg==MSG_RESCHEDULE) /* Reschedules we do via trap 0x30 */
625 {
626 irq=0x30;
627 if(smp_cpu_in_msg[p])
628 return;
629 }
630
631 /*
632 * Sanity check we don't re-enter this across CPU's. Only the kernel
633 * lock holder may send messages. For a STOP_CPU we are bringing the
634 * entire box to the fastest halt we can..
635 */
636
637 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
638 {
639 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
640 smp_processor_id(),msg,message_cpu, smp_msg_id);
641 }
642 message_cpu=smp_processor_id();
643
644
645 /*
646 * We are busy
647 */
648
649 smp_cpu_in_msg[p]++;
650
651 /*
652 * Reschedule is currently special
653 */
654
655 if(msg!=MSG_RESCHEDULE)
656 {
657 smp_src_cpu=p;
658 smp_msg_id=msg;
659 smp_msg_data=data;
660 }
661
662 /* printk("SMP message pass #%d to %d of %d\n",
663 p, msg, target);*/
664
665 /*
666 * Wait for the APIC to become ready - this should never occur. Its
667 * a debugging check really.
668 */
669
670 while(ct<1000)
671 {
672 cfg=apic_read(APIC_ICR);
673 if(!(cfg&(1<<12)))
674 break;
675 ct++;
676 udelay(10);
677 }
678
679 /*
680 * Just pray... there is nothing more we can do
681 */
682
683 if(ct==1000)
684 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
685
686 /*
687 * Program the APIC to deliver the IPI
688 */
689
690 cfg=apic_read(APIC_ICR2);
691 cfg&=0x00FFFFFF;
692 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target)); /* Target chip */
693 cfg=apic_read(APIC_ICR);
694 cfg&=~0xFDFFF; /* Clear bits */
695 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq; /* Send an IRQ 13 */
696
697 /*
698 * Set the target requirement
699 */
700
701 if(target==MSG_ALL_BUT_SELF)
702 {
703 cfg|=APIC_DEST_ALLBUT;
704 target_map=cpu_present_map;
705 cpu_callin_map[0]=(1<<smp_src_cpu);
706 }
707 else if(target==MSG_ALL)
708 {
709 cfg|=APIC_DEST_ALLINC;
710 target_map=cpu_present_map;
711 cpu_callin_map[0]=0;
712 }
713 else
714 {
715 target_map=(1<<target);
716 cpu_callin_map[0]=0;
717 }
718
719 /*
720 * Send the IPI. The write to APIC_ICR fires this off.
721 */
722
723 apic_write(APIC_ICR, cfg);
724
725 /*
726 * Spin waiting for completion
727 */
728
729 switch(wait)
730 {
731 case 1:
732 while(cpu_callin_map[0]!=target_map); /* Spin on the pass */
733 break;
734 case 2:
735 while(smp_invalidate_needed); /* Wait for invalidate map to clear */
736 break;
737 }
738
739 /*
740 * Record our completion
741 */
742
743 smp_cpu_in_msg[p]--;
744 message_cpu=NO_PROC_ID;
745 }
746
747 /*
748 * This is fraught with deadlocks. Linus does an invalidate at a whim
749 * even with IRQ's off. We have to avoid a pair of crossing invalidates
750 * or we are doomed. See the notes about smp_message_pass.
751 */
752
753 void smp_invalidate(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
754 {
755 unsigned long flags;
756 if(smp_activated && smp_processor_id()!=active_kernel_processor)
757 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
758 /* printk("SMI-");*/
759
760 /*
761 * The assignment is safe because its volatile so the compiler cannot reorder it,
762 * because the i586 has strict memory ordering and because only the kernel lock holder
763 * may issue an invalidate. If you break any one of those three change this to an atomic
764 * bus locked or.
765 */
766
767 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
768
769 /*
770 * Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will
771 * ensure they dont do a spurious invalidate or miss one.
772 */
773
774 save_flags(flags);
775 cli();
776 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
777
778 /*
779 * Flush the local TLB
780 */
781
782 local_invalidate();
783
784 restore_flags(flags);
785
786 /*
787 * Completed.
788 */
789
790 /* printk("SMID\n");*/
791 }
792
793 /*
794 * Reschedule call back
795 */
796
797 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
798 {
799 static int ct=0;
800 if(ct==0)
801 {
802 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
803 ct=1;
804 }
805
806 if(smp_processor_id()!=active_kernel_processor)
807 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
808 smp_processor_id(), active_kernel_processor);
809 /*
810 * Update resource usage on the slave timer tick.
811 */
812
813 if (user_mode(regs))
814 {
815 current->utime++;
816 if (current->pid)
817 {
818 if (current->priority < 15)
819 kstat.cpu_nice++;
820 else
821 kstat.cpu_user++;
822 }
823 /* Update ITIMER_VIRT for current task if not in a system call */
824 if (current->it_virt_value && !(--current->it_virt_value)) {
825 current->it_virt_value = current->it_virt_incr;
826 send_sig(SIGVTALRM,current,1);
827 }
828 } else {
829 current->stime++;
830 if(current->pid)
831 kstat.cpu_system++;
832 #ifdef CONFIG_PROFILE
833 if (prof_buffer && current->pid) {
834 extern int _stext;
835 unsigned long eip = regs->eip - (unsigned long) &_stext;
836 eip >>= CONFIG_PROFILE_SHIFT;
837 if (eip < prof_len)
838 prof_buffer[eip]++;
839 }
840 #endif
841 }
842 /*
843 * check the cpu time limit on the process.
844 */
845 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
846 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
847 send_sig(SIGKILL, current, 1);
848 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
849 (((current->stime + current->utime) % HZ) == 0)) {
850 unsigned long psecs = (current->stime + current->utime) / HZ;
851 /* send when equal */
852 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
853 send_sig(SIGXCPU, current, 1);
854 /* and every five seconds thereafter. */
855 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
856 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
857 send_sig(SIGXCPU, current, 1);
858 }
859
860 /* Update ITIMER_PROF for the current task */
861 if (current->it_prof_value && !(--current->it_prof_value)) {
862 current->it_prof_value = current->it_prof_incr;
863 send_sig(SIGPROF,current,1);
864 }
865
866
867 /*
868 * Don't reschedule if we are in an interrupt...
869 * [This is test code and not needed in the end]
870 */
871
872 /* if(intr_count==1)
873 {*/
874
875 /*
876 * See if the slave processors need a schedule.
877 */
878
879 if ( 0 > --current->counter || current->pid == 0)
880 {
881 current->counter = 0;
882 need_resched=1;
883 }
884 /* }*/
885
886 /*
887 * Clear the IPI
888 */
889 apic_read(APIC_SPIV); /* Dummy read */
890 apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
891 }
892
893 /*
894 * Message call back.
895 */
896
897 void smp_message_irq(int cpl, struct pt_regs *regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
898 {
899 int i=smp_processor_id();
900 /* static int n=0;
901 if(n++<NR_CPUS)
902 printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
903 switch(smp_msg_id)
904 {
905 case 0: /* IRQ 13 testing - boring */
906 return;
907
908 /*
909 * A TLB flush is needed.
910 */
911
912 case MSG_INVALIDATE_TLB:
913 if(clear_bit(i,&smp_invalidate_needed))
914 local_invalidate();
915 set_bit(i, &cpu_callin_map[0]);
916 cpu_callin_map[0]|=1<<smp_processor_id();
917 break;
918
919 /*
920 * Halt other CPU's for a panic or reboot
921 */
922 case MSG_STOP_CPU:
923 while(1)
924 {
925 if(cpu_data[smp_processor_id()].hlt_works_ok)
926 __asm__("hlt");
927 }
928 default:
929 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
930 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
931 break;
932 }
933 /*
934 * Clear the IPI, so we can receive future IPI's
935 */
936
937 apic_read(APIC_SPIV); /* Dummy read */
938 apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
939 }