1 /*
2 * Intel MP v1.1 specification support routines for multi-pentium
3 * hosts.
4 *
5 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
6 * Supported by Caldera http://www.caldera.com.
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * This code is released under the GNU public license version 2 or
11 * later.
12 *
13 * Fixes
14 * Felix Koop: NR_CPUS used properly
15 *
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/config.h>
21 #include <linux/timer.h>
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/delay.h>
26 #include <asm/i82489.h>
27 #include <linux/smp.h>
28 #include <asm/bitops.h>
29 #include <asm/smp.h>
30
31 extern void *vremap(unsigned long offset, unsigned long size); /* Linus hasnt put this in the headers yet */
32
33 static int smp_found_config=0; /* Have we found an SMP box */
34
35 unsigned long cpu_present_map = 0; /* Bitmask of existing CPU's */
36 int smp_num_cpus; /* Total count of live CPU's */
37 int smp_threads_ready=0; /* Set when the idlers are all forked */
38 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; /* We always use 0 the rest is ready for parallel delivery */
39 volatile unsigned long smp_invalidate_needed; /* Used for the invalidate map thats also checked in the spinlock */
40 struct cpuinfo_x86 cpu_data[NR_CPUS]; /* Per cpu bogomips and other parameters */
41 static unsigned int num_processors = 0; /* Internal processor count */
42 static unsigned long io_apic_addr = 0; /* Address of the I/O apic (not yet used) */
43 unsigned char boot_cpu_id = 0; /* Processor that is doing the boot up */
44 static unsigned char *kstack_base,*kstack_end; /* Kernel stack list pointers */
45 static int smp_activated = 0; /* Tripped once we need to start cross invalidating */
46 static volatile int smp_commenced=0; /* Tripped when we start scheduling */
47 static unsigned char nlong=0; /* Apparent value for boot CPU */
48 unsigned char *apic_reg=((unsigned char *)&nlong)-0x20; /* Later set to the vremap() of the APIC */
49 unsigned long apic_retval; /* Just debugging the assembler.. */
50 unsigned char *kernel_stacks[NR_CPUS]; /* Kernel stack pointers for CPU's (debugging) */
51
52 static volatile unsigned char smp_cpu_in_msg[NR_CPUS]; /* True if this processor is sending an IPI */
53 static volatile unsigned long smp_msg_data; /* IPI data pointer */
54 static volatile int smp_src_cpu; /* IPI sender processor */
55 static volatile int smp_msg_id; /* Message being sent */
56
57 volatile unsigned long kernel_flag=0; /* Kernel spinlock */
58 volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */
59 volatile unsigned long kernel_counter=0; /* Number of times the processor holds the lock */
60 volatile unsigned long syscall_count=0; /* Number of times the processor holds the syscall lock */
61 volatile unsigned long smp_spins=0; /* Count of cycles wasted to spinning */
62
63 volatile unsigned long ipi_count; /* Number of IPI's delivered */
64
65 /*
66 * Checksum an MP configuration block.
67 */
68
69 static int mpf_checksum(unsigned char *mp, int len)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
70 {
71 int sum=0;
72 while(len--)
73 sum+=*mp++;
74 return sum&0xFF;
75 }
76
77 /*
78 * Processor encoding in an MP configuration block
79 */
80
81 static char *mpc_family(int family,int model)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
82 {
83 static char n[32];
84 static char *model_defs[]=
85 {
86 "80486DX","80486DX",
87 "80486SX","80486DX/2 or 80487",
88 "80486SL","Intel5X2(tm)",
89 "Unknown","Unknown",
90 "80486DX/4"
91 };
92 if(family==0x5)
93 return("Pentium(tm)");
94 if(family==0x0F && model==0x0F)
95 return("Special controller");
96 if(family==0x04 && model<9)
97 return model_defs[model];
98 sprintf(n,"Unknown CPU [%d:%d]",family, model);
99 return n;
100 }
101
102 /*
103 * Read the MPC
104 */
105
106 static int smp_read_mpc(struct mp_config_table *mpc)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
107 {
108 char str[16];
109 int count=sizeof(*mpc);
110 int apics=0;
111 unsigned char *mpt=((unsigned char *)mpc)+count;
112
113 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
114 {
115 printk("Bad signature [%c%c%c%c].\n",
116 mpc->mpc_signature[0],
117 mpc->mpc_signature[1],
118 mpc->mpc_signature[2],
119 mpc->mpc_signature[3]);
120 return 1;
121 }
122 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
123 {
124 printk("Checksum error.\n");
125 return 1;
126 }
127 if(mpc->mpc_spec!=0x01)
128 {
129 printk("Unsupported version (%d)\n",mpc->mpc_spec);
130 return 1;
131 }
132 memcpy(str,mpc->mpc_oem,8);
133 str[8]=0;
134 printk("OEM ID: %s ",str);
135 memcpy(str,mpc->mpc_productid,12);
136 str[12]=0;
137 printk("Product ID: %s ",str);
138 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
139
140 /*
141 * Now process the configuration blocks.
142 */
143
144 while(count<mpc->mpc_length)
145 {
146 switch(*mpt)
147 {
148 case MP_PROCESSOR:
149 {
150 struct mpc_config_processor *m=
151 (struct mpc_config_processor *)mpt;
152 if(m->mpc_cpuflag&CPU_ENABLED)
153 {
154 printk("Processor #%d %s APIC version %d\n",
155 m->mpc_apicid,
156 mpc_family((m->mpc_cpufeature&
157 CPU_FAMILY_MASK)>>8,
158 (m->mpc_cpufeature&
159 CPU_MODEL_MASK)>>4),
160 m->mpc_apicver);
161 if(m->mpc_featureflag&(1<<0))
162 printk(" Floating point unit present.\n");
163 if(m->mpc_featureflag&(1<<7))
164 printk(" Machine Exception supported.\n");
165 if(m->mpc_featureflag&(1<<8))
166 printk(" 64 bit compare & exchange supported.\n");
167 if(m->mpc_featureflag&(1<<9))
168 printk(" Internal APIC present.\n");
169 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
170 {
171 printk(" Bootup CPU\n");
172 boot_cpu_id=m->mpc_apicid;
173 nlong = boot_cpu_id<<24; /* Dummy 'self' for bootup */
174 }
175 else /* Boot CPU already counted */
176 num_processors++;
177
178 if(m->mpc_apicid>NR_CPUS)
179 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
180 else
181 cpu_present_map|=(1<<m->mpc_apicid);
182 }
183 mpt+=sizeof(*m);
184 count+=sizeof(*m);
185 break;
186 }
187 case MP_BUS:
188 {
189 struct mpc_config_bus *m=
190 (struct mpc_config_bus *)mpt;
191 memcpy(str,m->mpc_bustype,6);
192 str[6]=0;
193 printk("Bus #%d is %s\n",
194 m->mpc_busid,
195 str);
196 mpt+=sizeof(*m);
197 count+=sizeof(*m);
198 break;
199 }
200 case MP_IOAPIC:
201 {
202 struct mpc_config_ioapic *m=
203 (struct mpc_config_ioapic *)mpt;
204 if(m->mpc_flags&MPC_APIC_USABLE)
205 {
206 apics++;
207 printk("I/O APIC #%d Version %d at 0x%lX.\n",
208 m->mpc_apicid,m->mpc_apicver,
209 m->mpc_apicaddr);
210 io_apic_addr = m->mpc_apicaddr;
211 }
212 mpt+=sizeof(*m);
213 count+=sizeof(*m);
214 break;
215 }
216 case MP_INTSRC:
217 {
218 struct mpc_config_intsrc *m=
219 (struct mpc_config_intsrc *)mpt;
220
221 mpt+=sizeof(*m);
222 count+=sizeof(*m);
223 break;
224 }
225 case MP_LINTSRC:
226 {
227 struct mpc_config_intlocal *m=
228 (struct mpc_config_intlocal *)mpt;
229 mpt+=sizeof(*m);
230 count+=sizeof(*m);
231 break;
232 }
233 }
234 }
235 if(apics>1)
236 printk("Warning: Multiple APIC's not supported.\n");
237 return num_processors;
238 }
239
240 /*
241 * Scan the memory blocks for an SMP configuration block.
242 */
243
244 void smp_scan_config(unsigned long base, unsigned long length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
245 {
246 unsigned long *bp=(unsigned long *)base;
247 struct intel_mp_floating *mpf;
248 num_processors = 1; /* The boot processor */
249
250 /* printk("Scan SMP from %p for %ld bytes.\n",
251 bp,length);*/
252 if(sizeof(*mpf)!=16)
253 printk("Error: MPF size\n");
254
255 while(length>0)
256 {
257 if(*bp==SMP_MAGIC_IDENT)
258 {
259 mpf=(struct intel_mp_floating *)bp;
260 if(mpf->mpf_length==1 &&
261 !mpf_checksum((unsigned char *)bp,16) &&
262 mpf->mpf_specification==1)
263 {
264 printk("Intel multiprocessing (MPv1.1) available.\n");
265 if(mpf->mpf_feature2&(1<<7))
266 printk(" IMCR and PIC mode supported.\n");
267 smp_found_config=1;
268 /*
269 * Now see if we need to read further.
270 */
271 if(mpf->mpf_feature1!=0)
272 {
273 num_processors=2;
274 printk("I/O APIC at 0xFEC00000.\n");
275 printk("Bus#0 is ");
276 }
277 switch(mpf->mpf_feature1)
278 {
279 case 1:
280 printk("ISA");
281 break;
282 case 2:
283 printk("EISA with no IRQ8 chaining");
284 break;
285 case 3:
286 printk("EISA");
287 break;
288 case 4:
289 printk("MCA");
290 break;
291 case 5:
292 printk("ISA\nBus#1 is PCI");
293 break;
294 case 6:
295 printk("EISA\nBus #1 is PCI");
296 break;
297 case 7:
298 printk("MCA\nBus #1 is PCI");
299 break;
300 case 0:
301 break;
302 default:
303 printk("???\nUnknown standard configuration %d\n",
304 mpf->mpf_feature1);
305 return;
306 }
307 /*
308 * Read the physical hardware table. If there isn't one
309 * the processors present are 0 and 1.
310 */
311 if(mpf->mpf_physptr)
312 smp_read_mpc((void *)mpf->mpf_physptr);
313 else
314 cpu_present_map=3;
315 printk("Processors: %d\n", num_processors);
316 }
317 }
318 bp+=4;
319 length-=16;
320 }
321 }
322
323 /*
324 * Trampoline 80x86 program as an array.
325 */
326
327 static unsigned char trampoline_data[]={
328 #include "trampoline.hex"
329 };
330
331 /*
332 * Currently trivial. Write the real->protected mode
333 * bootstrap into the page concerned. The caller
334 * has made sure its suitably aligned.
335 */
336
337 static void install_trampoline(unsigned char *mp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
338 {
339 memcpy(mp,trampoline_data,sizeof(trampoline_data));
340 }
341
342 /*
343 * We are called very early to get the low memory for the trampoline/kernel stacks
344 * This has to be done by mm/init.c to parcel us out nice low memory. We allocate
345 * the kernel stacks at 4K, 8K, 12K... currently (0-03FF is preserved for SMM and
346 * other things).
347 */
348
349 unsigned long smp_alloc_memory(unsigned long mem_base)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
350 {
351 int size=(num_processors-1)*PAGE_SIZE; /* Number of stacks needed */
352 /*
353 * Our stacks have to be below the 1Mb line, and mem_base on entry
354 * is 4K aligned.
355 */
356
357 if(mem_base+size>=0x9F000)
358 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
359 kstack_base=(void *)mem_base;
360 mem_base+=size;
361 kstack_end=(void *)mem_base;
362 return mem_base;
363 }
364
365 /*
366 * Hand out stacks one at a time.
367 */
368
369 static void *get_kernel_stack(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
370 {
371 void *stack=kstack_base;
372 if(kstack_base>=kstack_end)
373 return NULL;
374 kstack_base+=PAGE_SIZE;
375 return stack;
376 }
377
378
379 /*
380 * The bootstrap kernel entry code has set these up. Save them for
381 * a given CPU
382 */
383
384 void smp_store_cpu_info(int id)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
385 {
386 struct cpuinfo_x86 *c=&cpu_data[id];
387 c->hard_math=hard_math; /* Always assumed same currently */
388 c->x86=x86;
389 c->x86_model=x86_model;
390 c->x86_mask=x86_mask;
391 c->x86_capability=x86_capability;
392 c->fdiv_bug=fdiv_bug;
393 c->wp_works_ok=wp_works_ok; /* Always assumed the same currently */
394 c->hlt_works_ok=hlt_works_ok;
395 c->udelay_val=loops_per_sec;
396 strcpy(c->x86_vendor_id, x86_vendor_id);
397 }
398
399 /*
400 * Architecture specific routine called by the kernel just before init is
401 * fired off. This allows the BP to have everything in order [we hope].
402 * At the end of this all the AP's will hit the system scheduling and off
403 * we go. Each AP will load the system gdt's and jump through the kernel
404 * init into idle(). At this point the scheduler will one day take over
405 * and give them jobs to do. smp_callin is a standard routine
406 * we use to track CPU's as they power up.
407 */
408
409 void smp_commence(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
410 {
411 /*
412 * Lets the callin's below out of their loop.
413 */
414 smp_commenced=1;
415 }
416
417 void smp_callin(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
418 {
419 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
420 unsigned long l;
421 /*
422 * Activate our APIC
423 */
424
425 /* printk("CALLIN %d\n",smp_processor_id());*/
426 l=apic_read(APIC_SPIV);
427 l|=(1<<8); /* Enable */
428 apic_write(APIC_SPIV,l);
429 sti();
430 /*
431 * Get our bogomips.
432 */
433 calibrate_delay();
434 /*
435 * Save our processor parameters
436 */
437 smp_store_cpu_info(cpuid);
438 /*
439 * Allow the master to continue.
440 */
441 set_bit(cpuid, &cpu_callin_map[0]);
442 /*
443 * Until we are ready for SMP scheduling
444 */
445 load_ldt(0);
446 /* printk("Testing faulting...\n");
447 *(long *)0=1; OOPS... */
448 local_invalidate();
449 while(!smp_commenced);
450 local_invalidate();
451 /* printk("Commenced..\n");*/
452
453 /* This assumes the processor id's are consecutive 0..n-1 - FIXME */
454 load_TR(cpuid);
455 /* while(1);*/
456 }
457
458 /*
459 * Cycle through the processors sending pentium IPI's to boot each.
460 */
461
462 void smp_boot_cpus(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
463 {
464 int i=0;
465 int cpucount=0;
466 void *stack;
467 extern unsigned long init_user_stack[];
468
469 /*
470 * Map the local APIC into kernel space
471 */
472
473 apic_reg = vremap(0xFEE00000,4096);
474
475
476 if(apic_reg == NULL)
477 panic("Unable to map local apic.\n");
478
479 /*
480 * Now scan the cpu present map and fire up anything we find.
481 */
482
483
484 kernel_stacks[boot_cpu_id]=(void *)init_user_stack; /* Set up for boot processor first */
485
486 smp_store_cpu_info(boot_cpu_id); /* Final full version of the data */
487
488 active_kernel_processor=boot_cpu_id;
489
490 for(i=0;i<NR_CPUS;i++)
491 {
492 if((cpu_present_map&(1<<i)) && i!=boot_cpu_id) /* Rebooting yourself is a bad move */
493 {
494 unsigned long cfg;
495 int timeout;
496
497 /*
498 * We need a kernel stack for each processor.
499 */
500
501 stack=get_kernel_stack(); /* We allocated these earlier */
502 if(stack==NULL)
503 panic("No memory for processor stacks.\n");
504 kernel_stacks[i]=stack;
505 install_trampoline(stack);
506
507 printk("Booting processor %d stack %p: ",i,stack); /* So we set whats up */
508
509 /*
510 * Enable the local APIC
511 */
512
513 cfg=apic_read(APIC_SPIV);
514 cfg|=(1<<8); /* Enable APIC */
515 apic_write(APIC_SPIV,cfg);
516
517 /*
518 * This gunge sends an IPI (Inter Processor Interrupt) to the
519 * processor we wish to wake. When the startup IPI is received
520 * the target CPU does a real mode jump to the stack base.
521 */
522
523 cfg=apic_read(APIC_ICR2);
524 cfg&=0x00FFFFFF;
525 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i)); /* Target chip */
526 cfg=apic_read(APIC_ICR);
527 cfg&=~0xFDFFF ; /* Clear bits */
528 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12); /* Boot on the stack */
529 apic_write(APIC_ICR, cfg); /* Kick the second */
530 udelay(10); /* Masses of time */
531 cfg=apic_read(APIC_ESR);
532 if(cfg&4) /* Send accept error */
533 printk("Processor refused startup request.\n");
534 else
535 {
536 for(timeout=0;timeout<50000;timeout++)
537 {
538 if(cpu_callin_map[0]&(1<<i))
539 break; /* It has booted */
540 udelay(100); /* Wait 5s total for a response */
541 }
542 if(cpu_callin_map[0]&(1<<i))
543 cpucount++;
544 else
545 {
546 /*
547 * At this point we should set up a BIOS warm start and try
548 * a RESTART IPI. The 486+82489 MP pair don't support STARTUP IPI's
549 */
550 if(*((unsigned char *)8192)==0xA5)
551 printk("Stuck ??\n");
552 else
553 printk("Not responding.\n");
554 cpu_present_map&=~(1<<i);
555 }
556 }
557 }
558 }
559 /*
560 * Allow the user to impress friends.
561 */
562 if(cpucount==0)
563 printk("Error: only one processor found.\n");
564 else
565 {
566 printk("Total of %d processors activated.\n", cpucount+1);
567 smp_activated=1;
568 smp_num_cpus=cpucount+1;
569 }
570 }
571
572
573 /*
574 * A non wait message cannot pass data or cpu source info. This current setup
575 * is only safe because the kernel lock owner is the only person who can send a message.
576 *
577 * Wrapping this whole block in a spinlock is not the safe answer either. A processor may
578 * get stuck with irq's off waiting to send a message and thus not replying to the person
579 * spinning for a reply....
580 *
581 * In the end invalidate ought to be the NMI and a very very short function (to avoid the old
582 * IDE disk problems), and other messages sent with IRQ's enabled in a civilised fashion. That
583 * will also boost performance.
584 */
585
586 void smp_message_pass(int target, int msg, unsigned long data, int wait)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
587 {
588 unsigned long cfg;
589 unsigned long target_map;
590 int p=smp_processor_id();
591 int irq=0x2d; /* IRQ 13 */
592 int ct=0;
593 static volatile int message_cpu = NO_PROC_ID;
594
595 /*
596 * During boot up send no messages
597 */
598
599 if(!smp_activated)
600 return;
601
602
603 /*
604 * Skip the reschedule if we are waiting to clear a
605 * message at this time. The reschedule cannot wait
606 * but is not critical.
607 */
608
609 if(msg==MSG_RESCHEDULE) /* Reschedules we do via trap 0x30 */
610 {
611 irq=0x30;
612 if(smp_cpu_in_msg[p])
613 return;
614 }
615
616 /*
617 * Sanity check we don't re-enter this across CPU's. Only the kernel
618 * lock holder may send messages. For a STOP_CPU we are bringing the
619 * entire box to the fastest halt we can..
620 */
621
622 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
623 {
624 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
625 smp_processor_id(),msg,message_cpu, smp_msg_id);
626 }
627 message_cpu=smp_processor_id();
628
629
630 /*
631 * We are busy
632 */
633
634 smp_cpu_in_msg[p]++;
635
636 /*
637 * Reschedule is currently special
638 */
639
640 if(msg!=MSG_RESCHEDULE)
641 {
642 smp_src_cpu=p;
643 smp_msg_id=msg;
644 smp_msg_data=data;
645 }
646
647 /* printk("SMP message pass #%d to %d of %d\n",
648 p, msg, target);*/
649
650 /*
651 * Wait for the APIC to become ready - this should never occur. Its
652 * a debugging check really.
653 */
654
655 while(ct<1000)
656 {
657 cfg=apic_read(APIC_ICR);
658 if(!(cfg&(1<<12)))
659 break;
660 ct++;
661 udelay(10);
662 }
663
664 /*
665 * Just pray... there is nothing more we can do
666 */
667
668 if(ct==1000)
669 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
670
671 /*
672 * Program the APIC to deliver the IPI
673 */
674
675 cfg=apic_read(APIC_ICR2);
676 cfg&=0x00FFFFFF;
677 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target)); /* Target chip */
678 cfg=apic_read(APIC_ICR);
679 cfg&=~0xFDFFF; /* Clear bits */
680 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq; /* Send an IRQ 13 */
681
682 /*
683 * Set the target requirement
684 */
685
686 if(target==MSG_ALL_BUT_SELF)
687 {
688 cfg|=APIC_DEST_ALLBUT;
689 target_map=cpu_present_map;
690 cpu_callin_map[0]=(1<<smp_src_cpu);
691 }
692 else if(target==MSG_ALL)
693 {
694 cfg|=APIC_DEST_ALLINC;
695 target_map=cpu_present_map;
696 cpu_callin_map[0]=0;
697 }
698 else
699 {
700 target_map=(1<<target);
701 cpu_callin_map[0]=0;
702 }
703
704 /*
705 * Send the IPI. The write to APIC_ICR fires this off.
706 */
707
708 apic_write(APIC_ICR, cfg);
709
710 /*
711 * Spin waiting for completion
712 */
713
714 switch(wait)
715 {
716 case 1:
717 while(cpu_callin_map[0]!=target_map); /* Spin on the pass */
718 break;
719 case 2:
720 while(smp_invalidate_needed); /* Wait for invalidate map to clear */
721 break;
722 }
723
724 /*
725 * Record our completion
726 */
727
728 smp_cpu_in_msg[p]--;
729 message_cpu=NO_PROC_ID;
730 }
731
732 /*
733 * This is fraught with deadlocks. Linus does an invalidate at a whim
734 * even with IRQ's off. We have to avoid a pair of crossing invalidates
735 * or we are doomed. See the notes about smp_message_pass.
736 */
737
738 void smp_invalidate(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
739 {
740 unsigned long flags;
741 if(smp_activated && smp_processor_id()!=active_kernel_processor)
742 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
743 /* printk("SMI-");*/
744
745 /*
746 * The assignment is safe because its volatile so the compiler cannot reorder it,
747 * because the i586 has strict memory ordering and because only the kernel lock holder
748 * may issue an invalidate. If you break any one of those three change this to an atomic
749 * bus locked or.
750 */
751
752 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
753
754 /*
755 * Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will
756 * ensure they dont do a spurious invalidate or miss one.
757 */
758
759 save_flags(flags);
760 cli();
761 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
762
763 /*
764 * Flush the local TLB
765 */
766
767 local_invalidate();
768
769 restore_flags(flags);
770
771 /*
772 * Completed.
773 */
774
775 /* printk("SMID\n");*/
776 }
777
778 /*
779 * Reschedule call back
780 */
781
782 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
783 {
784 static int ct=0;
785 if(ct==0)
786 {
787 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
788 ct=1;
789 }
790
791 if(smp_processor_id()!=active_kernel_processor)
792 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
793 smp_processor_id(), active_kernel_processor);
794 /*
795 * Update resource usage on the slave timer tick.
796 */
797
798 if (user_mode(regs))
799 {
800 current->utime++;
801 if (current->pid)
802 {
803 if (current->priority < 15)
804 kstat.cpu_nice++;
805 else
806 kstat.cpu_user++;
807 }
808 /* Update ITIMER_VIRT for current task if not in a system call */
809 if (current->it_virt_value && !(--current->it_virt_value)) {
810 current->it_virt_value = current->it_virt_incr;
811 send_sig(SIGVTALRM,current,1);
812 }
813 } else {
814 current->stime++;
815 if(current->pid)
816 kstat.cpu_system++;
817 #ifdef CONFIG_PROFILE
818 if (prof_buffer && current->pid) {
819 extern int _stext;
820 unsigned long eip = regs->eip - (unsigned long) &_stext;
821 eip >>= CONFIG_PROFILE_SHIFT;
822 if (eip < prof_len)
823 prof_buffer[eip]++;
824 }
825 #endif
826 }
827 /*
828 * check the cpu time limit on the process.
829 */
830 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
831 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
832 send_sig(SIGKILL, current, 1);
833 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
834 (((current->stime + current->utime) % HZ) == 0)) {
835 unsigned long psecs = (current->stime + current->utime) / HZ;
836 /* send when equal */
837 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
838 send_sig(SIGXCPU, current, 1);
839 /* and every five seconds thereafter. */
840 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
841 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
842 send_sig(SIGXCPU, current, 1);
843 }
844
845 /* Update ITIMER_PROF for the current task */
846 if (current->it_prof_value && !(--current->it_prof_value)) {
847 current->it_prof_value = current->it_prof_incr;
848 send_sig(SIGPROF,current,1);
849 }
850
851
852 /*
853 * Don't reschedule if we are in an interrupt...
854 * [This is test code and not needed in the end]
855 */
856
857 /* if(intr_count==1)
858 {*/
859
860 /*
861 * See if the slave processors need a schedule.
862 */
863
864 if ( 0 > --current->counter || current->pid == 0)
865 {
866 current->counter = 0;
867 need_resched=1;
868 }
869 /* }*/
870
871 /*
872 * Clear the IPI
873 */
874 apic_read(APIC_SPIV); /* Dummy read */
875 apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
876 }
877
878 /*
879 * Message call back.
880 */
881
882 void smp_message_irq(int cpl, struct pt_regs *regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
883 {
884 static int n=0;
885 int i=smp_processor_id();
886 /* if(n++<NR_CPUS)
887 printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
888 switch(smp_msg_id)
889 {
890 case 0: /* IRQ 13 testing - boring */
891 return;
892
893 /*
894 * A TLB flush is needed.
895 */
896
897 case MSG_INVALIDATE_TLB:
898 if(clear_bit(i,&smp_invalidate_needed))
899 local_invalidate();
900 set_bit(i, &cpu_callin_map[0]);
901 cpu_callin_map[0]|=1<<smp_processor_id();
902 break;
903
904 /*
905 * Halt other CPU's for a panic or reboot
906 */
907 case MSG_STOP_CPU:
908 while(1)
909 {
910 if(cpu_data[smp_processor_id()].hlt_works_ok)
911 __asm__("hlt");
912 }
913 default:
914 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
915 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
916 break;
917 }
918 /*
919 * Clear the IPI, so we can receive future IPI's
920 */
921
922 apic_read(APIC_SPIV); /* Dummy read */
923 apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
924 }