This source file includes following definitions.
- mpf_checksum
- mpc_family
- smp_read_mpc
- smp_scan_config
- install_trampoline
- smp_alloc_memory
- get_kernel_stack
- smp_store_cpu_info
- smp_commence
- smp_callin
- smp_boot_cpus
- smp_message_pass
- smp_invalidate
- smp_reschedule_irq
- smp_message_irq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/timer.h>
27 #include <linux/sched.h>
28 #include <linux/mm.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/mc146818rtc.h>
32 #include <asm/i82489.h>
33 #include <linux/smp.h>
34 #include <asm/pgtable.h>
35 #include <asm/bitops.h>
36 #include <asm/pgtable.h>
37 #include <asm/smp.h>
38
39 int smp_found_config=0;
40
41 unsigned long cpu_present_map = 0;
42 int smp_num_cpus;
43 int smp_threads_ready=0;
44 volatile int cpu_number_map[NR_CPUS];
45 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
46 volatile unsigned long smp_invalidate_needed;
47 struct cpuinfo_x86 cpu_data[NR_CPUS];
48 static unsigned int num_processors = 1;
49 static unsigned long io_apic_addr = 0;
50 unsigned char boot_cpu_id = 0;
51 static unsigned char *kstack_base,*kstack_end;
52 static int smp_activated = 0;
53 int apic_version[NR_CPUS];
54 static volatile int smp_commenced=0;
55 unsigned long apic_addr=0xFEE00000;
56 unsigned long nlong = 0;
57 unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;
58 unsigned long apic_retval;
59 unsigned char *kernel_stacks[NR_CPUS];
60
61 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
62 static volatile unsigned long smp_msg_data;
63 static volatile int smp_src_cpu;
64 static volatile int smp_msg_id;
65
66 volatile unsigned long kernel_flag=0;
67 volatile unsigned char active_kernel_processor = NO_PROC_ID;
68 volatile unsigned long kernel_counter=0;
69 volatile unsigned long syscall_count=0;
70
71 volatile unsigned long ipi_count;
72 #ifdef __SMP_PROF__
73 volatile unsigned long smp_spins[NR_CPUS]={0};
74 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
75 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
76 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
77 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
78 #endif
79 #if defined (__SMP_PROF__)
80 volatile unsigned long smp_idle_map=0;
81 #endif
82
83 volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};
84 volatile unsigned long smp_process_available=0;
85
86
87
88 #ifdef SMP_DEBUG
89 #define SMP_PRINTK(x) printk x
90 #else
91 #define SMP_PRINTK(x)
92 #endif
93
94
95
96
97
98
99 static int mpf_checksum(unsigned char *mp, int len)
100 {
101 int sum=0;
102 while(len--)
103 sum+=*mp++;
104 return sum&0xFF;
105 }
106
107
108
109
110
111 static char *mpc_family(int family,int model)
112 {
113 static char n[32];
114 static char *model_defs[]=
115 {
116 "80486DX","80486DX",
117 "80486SX","80486DX/2 or 80487",
118 "80486SL","Intel5X2(tm)",
119 "Unknown","Unknown",
120 "80486DX/4"
121 };
122 if(family==0x6)
123 return("Pentium(tm) Pro");
124 if(family==0x5)
125 return("Pentium(tm)");
126 if(family==0x0F && model==0x0F)
127 return("Special controller");
128 if(family==0x04 && model<9)
129 return model_defs[model];
130 sprintf(n,"Unknown CPU [%d:%d]",family, model);
131 return n;
132 }
133
134
135
136
137
138 static int smp_read_mpc(struct mp_config_table *mpc)
139 {
140 char str[16];
141 int count=sizeof(*mpc);
142 int apics=0;
143 unsigned char *mpt=((unsigned char *)mpc)+count;
144
145 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
146 {
147 printk("Bad signature [%c%c%c%c].\n",
148 mpc->mpc_signature[0],
149 mpc->mpc_signature[1],
150 mpc->mpc_signature[2],
151 mpc->mpc_signature[3]);
152 return 1;
153 }
154 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
155 {
156 printk("Checksum error.\n");
157 return 1;
158 }
159 if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
160 {
161 printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
162 return 1;
163 }
164 memcpy(str,mpc->mpc_oem,8);
165 str[8]=0;
166 printk("OEM ID: %s ",str);
167 memcpy(str,mpc->mpc_productid,12);
168 str[12]=0;
169 printk("Product ID: %s ",str);
170 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
171
172
173 apic_addr = mpc->mpc_lapic;
174
175
176
177
178
179 while(count<mpc->mpc_length)
180 {
181 switch(*mpt)
182 {
183 case MP_PROCESSOR:
184 {
185 struct mpc_config_processor *m=
186 (struct mpc_config_processor *)mpt;
187 if(m->mpc_cpuflag&CPU_ENABLED)
188 {
189 printk("Processor #%d %s APIC version %d\n",
190 m->mpc_apicid,
191 mpc_family((m->mpc_cpufeature&
192 CPU_FAMILY_MASK)>>8,
193 (m->mpc_cpufeature&
194 CPU_MODEL_MASK)>>4),
195 m->mpc_apicver);
196 #ifdef SMP_DEBUG
197 if(m->mpc_featureflag&(1<<0))
198 printk(" Floating point unit present.\n");
199 if(m->mpc_featureflag&(1<<7))
200 printk(" Machine Exception supported.\n");
201 if(m->mpc_featureflag&(1<<8))
202 printk(" 64 bit compare & exchange supported.\n");
203 if(m->mpc_featureflag&(1<<9))
204 printk(" Internal APIC present.\n");
205 #endif
206 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
207 {
208 SMP_PRINTK((" Bootup CPU\n"));
209 boot_cpu_id=m->mpc_apicid;
210 nlong = boot_cpu_id<<24;
211 }
212 else
213 num_processors++;
214
215 if(m->mpc_apicid>NR_CPUS)
216 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
217 else
218 {
219 cpu_present_map|=(1<<m->mpc_apicid);
220 apic_version[m->mpc_apicid]=m->mpc_apicver;
221 }
222 }
223 mpt+=sizeof(*m);
224 count+=sizeof(*m);
225 break;
226 }
227 case MP_BUS:
228 {
229 struct mpc_config_bus *m=
230 (struct mpc_config_bus *)mpt;
231 memcpy(str,m->mpc_bustype,6);
232 str[6]=0;
233 SMP_PRINTK(("Bus #%d is %s\n",
234 m->mpc_busid,
235 str));
236 mpt+=sizeof(*m);
237 count+=sizeof(*m);
238 break;
239 }
240 case MP_IOAPIC:
241 {
242 struct mpc_config_ioapic *m=
243 (struct mpc_config_ioapic *)mpt;
244 if(m->mpc_flags&MPC_APIC_USABLE)
245 {
246 apics++;
247 printk("I/O APIC #%d Version %d at 0x%lX.\n",
248 m->mpc_apicid,m->mpc_apicver,
249 m->mpc_apicaddr);
250 io_apic_addr = m->mpc_apicaddr;
251 }
252 mpt+=sizeof(*m);
253 count+=sizeof(*m);
254 break;
255 }
256 case MP_INTSRC:
257 {
258 struct mpc_config_intsrc *m=
259 (struct mpc_config_intsrc *)mpt;
260
261 mpt+=sizeof(*m);
262 count+=sizeof(*m);
263 break;
264 }
265 case MP_LINTSRC:
266 {
267 struct mpc_config_intlocal *m=
268 (struct mpc_config_intlocal *)mpt;
269 mpt+=sizeof(*m);
270 count+=sizeof(*m);
271 break;
272 }
273 }
274 }
275 if(apics>1)
276 printk("Warning: Multiple APIC's not supported.\n");
277 return num_processors;
278 }
279
280
281
282
283
284 void smp_scan_config(unsigned long base, unsigned long length)
285 {
286 unsigned long *bp=(unsigned long *)base;
287 struct intel_mp_floating *mpf;
288
289 SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
290 bp,length));
291 if(sizeof(*mpf)!=16)
292 printk("Error: MPF size\n");
293
294 while(length>0)
295 {
296 if(*bp==SMP_MAGIC_IDENT)
297 {
298 mpf=(struct intel_mp_floating *)bp;
299 if(mpf->mpf_length==1 &&
300 !mpf_checksum((unsigned char *)bp,16) &&
301 (mpf->mpf_specification == 1
302 || mpf->mpf_specification == 4) )
303 {
304 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
305 if(mpf->mpf_feature2&(1<<7))
306 printk(" IMCR and PIC compatibility mode.\n");
307 else
308 printk(" Virtual Wire compatibility mode.\n");
309 smp_found_config=1;
310
311
312
313 if(mpf->mpf_feature1!=0)
314 {
315 num_processors=2;
316 printk("I/O APIC at 0xFEC00000.\n");
317 printk("Bus#0 is ");
318 }
319 switch(mpf->mpf_feature1)
320 {
321 case 1:
322 printk("ISA");
323 break;
324 case 2:
325 printk("EISA with no IRQ8 chaining");
326 break;
327 case 3:
328 printk("EISA");
329 break;
330 case 4:
331 printk("MCA");
332 break;
333 case 5:
334 printk("ISA\nBus#1 is PCI");
335 break;
336 case 6:
337 printk("EISA\nBus #1 is PCI");
338 break;
339 case 7:
340 printk("MCA\nBus #1 is PCI");
341 break;
342 case 0:
343 break;
344 default:
345 printk("???\nUnknown standard configuration %d\n",
346 mpf->mpf_feature1);
347 return;
348 }
349
350
351
352
353 if(mpf->mpf_physptr)
354 smp_read_mpc((void *)mpf->mpf_physptr);
355 else
356 cpu_present_map=3;
357 printk("Processors: %d\n", num_processors);
358
359
360
361 return;
362 }
363 }
364 bp+=4;
365 length-=16;
366 }
367 }
368
369
370
371
372
373 static unsigned char trampoline_data[]={
374 #include "trampoline.hex"
375 };
376
377
378
379
380
381
382
383 static void install_trampoline(unsigned char *mp)
384 {
385 memcpy(mp,trampoline_data,sizeof(trampoline_data));
386 }
387
388
389
390
391
392
393
394
395 unsigned long smp_alloc_memory(unsigned long mem_base)
396 {
397 int size=(num_processors-1)*PAGE_SIZE;
398
399
400
401
402
403 if(mem_base+size>=0x9F000)
404 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
405 kstack_base=(void *)mem_base;
406 mem_base+=size;
407 kstack_end=(void *)mem_base;
408 return mem_base;
409 }
410
411
412
413
414
415 static void *get_kernel_stack(void)
416 {
417 void *stack=kstack_base;
418 if(kstack_base>=kstack_end)
419 return NULL;
420 kstack_base+=PAGE_SIZE;
421 return stack;
422 }
423
424
425
426
427
428
429
430 void smp_store_cpu_info(int id)
431 {
432 struct cpuinfo_x86 *c=&cpu_data[id];
433 c->hard_math=hard_math;
434 c->x86=x86;
435 c->x86_model=x86_model;
436 c->x86_mask=x86_mask;
437 c->x86_capability=x86_capability;
438 c->fdiv_bug=fdiv_bug;
439 c->wp_works_ok=wp_works_ok;
440 c->hlt_works_ok=hlt_works_ok;
441 c->udelay_val=loops_per_sec;
442 strcpy(c->x86_vendor_id, x86_vendor_id);
443 }
444
445
446
447
448
449
450
451
452
453
454
455 void smp_commence(void)
456 {
457
458
459
460 smp_commenced=1;
461 }
462
463 void smp_callin(void)
464 {
465 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
466 unsigned long l;
467
468
469
470
471 SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
472 l=apic_read(APIC_SPIV);
473 l|=(1<<8);
474 apic_write(APIC_SPIV,l);
475 sti();
476
477
478
479 calibrate_delay();
480
481
482
483 smp_store_cpu_info(cpuid);
484
485
486
487 set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
488
489
490
491 load_ldt(0);
492
493
494 local_invalidate();
495 while(!smp_commenced);
496 if (cpu_number_map[cpuid] == -1)
497 while(1);
498 local_invalidate();
499 SMP_PRINTK(("Commenced..\n"));
500
501 load_TR(cpu_number_map[cpuid]);
502
503 }
504
505
506
507
508
509 void smp_boot_cpus(void)
510 {
511 int i,j;
512 int cpucount=0;
513 unsigned long cfg;
514 void *stack;
515 extern unsigned long init_user_stack[];
516
517
518
519
520
521 for (i = 0; i < NR_CPUS; i++)
522 cpu_number_map[i] = -1;
523
524
525
526
527
528 kernel_stacks[boot_cpu_id]=(void *)init_user_stack;
529
530 smp_store_cpu_info(boot_cpu_id);
531
532 cpu_present_map |= (1 << smp_processor_id());
533 cpu_number_map[boot_cpu_id] = 0;
534 active_kernel_processor=boot_cpu_id;
535
536
537
538
539
540
541 if (!smp_found_config)
542 return;
543
544
545
546
547
548 apic_reg = vremap(apic_addr,4096);
549
550 if(apic_reg == NULL)
551 panic("Unable to map local apic.\n");
552
553 #ifdef SMP_DEBUG
554 {
555 int reg;
556
557
558
559
560
561
562
563
564 reg = apic_read(APIC_VERSION);
565 SMP_PRINTK(("Getting VERSION: %x\n", reg));
566
567 apic_write(APIC_VERSION, 0);
568 reg = apic_read(APIC_VERSION);
569 SMP_PRINTK(("Getting VERSION: %x\n", reg));
570
571
572
573
574
575
576
577
578
579
580
581
582
583 reg = apic_read(APIC_LVT0);
584 SMP_PRINTK(("Getting LVT0: %x\n", reg));
585
586 reg = apic_read(APIC_LVT1);
587 SMP_PRINTK(("Getting LVT1: %x\n", reg));
588 }
589 #endif
590
591
592
593
594
595 cfg=apic_read(APIC_SPIV);
596 cfg|=(1<<8);
597 apic_write(APIC_SPIV,cfg);
598
599 udelay(10);
600
601
602
603
604
605 SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
606
607 for(i=0;i<NR_CPUS;i++)
608 {
609
610
611
612 if (i == boot_cpu_id)
613 continue;
614
615 if (cpu_present_map & (1 << i))
616 {
617 unsigned long send_status, accept_status;
618 int timeout, num_starts;
619
620
621
622
623
624 stack=get_kernel_stack();
625 if(stack==NULL)
626 panic("No memory for processor stacks.\n");
627 kernel_stacks[i]=stack;
628 install_trampoline(stack);
629
630 printk("Booting processor %d stack %p: ",i,stack);
631
632
633
634
635
636
637 SMP_PRINTK(("Setting warm reset code and vector.\n"));
638
639
640
641
642
643 cfg=pg0[0];
644
645 CMOS_WRITE(0xa, 0xf);
646 pg0[0]=7;
647 local_invalidate();
648 *((volatile unsigned short *) 0x469) = ((unsigned long)stack)>>4;
649 *((volatile unsigned short *) 0x467) = 0;
650
651
652
653
654
655 pg0[0]= cfg;
656 local_invalidate();
657
658
659
660
661
662 apic_write(APIC_ESR, 0);
663 accept_status = (apic_read(APIC_ESR) & 0xEF);
664
665
666
667
668
669 send_status = 0;
670 accept_status = 0;
671
672
673
674
675
676 SMP_PRINTK(("Asserting INIT.\n"));
677
678
679
680
681
682 cfg=apic_read(APIC_ICR2);
683 cfg&=0x00FFFFFF;
684 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
685 cfg=apic_read(APIC_ICR);
686 cfg&=~0xCDFFF;
687 cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
688 | APIC_DEST_ASSERT | APIC_DEST_DM_INIT);
689 apic_write(APIC_ICR, cfg);
690
691 udelay(200);
692 SMP_PRINTK(("Deasserting INIT.\n"));
693
694 cfg=apic_read(APIC_ICR2);
695 cfg&=0x00FFFFFF;
696 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
697 cfg=apic_read(APIC_ICR);
698 cfg&=~0xCDFFF;
699 cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
700 | APIC_DEST_DM_INIT);
701 apic_write(APIC_ICR, cfg);
702
703
704
705
706
707
708
709
710
711 if ( apic_version[i] & 0xF0 )
712 num_starts = 2;
713 else
714 num_starts = 0;
715
716
717
718
719
720 for (j = 0; !(send_status || accept_status)
721 && (j < num_starts) ; j++)
722 {
723 SMP_PRINTK(("Sending STARTUP #%d.\n",j));
724
725 apic_write(APIC_ESR, 0);
726
727
728
729
730
731 cfg=apic_read(APIC_ICR2);
732 cfg&=0x00FFFFFF;
733 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
734 cfg=apic_read(APIC_ICR);
735 cfg&=~0xCDFFF;
736 cfg |= (APIC_DEST_FIELD
737 | APIC_DEST_DM_STARTUP
738 | (((int) stack) >> 12) );
739 apic_write(APIC_ICR, cfg);
740
741 timeout = 0;
742 do {
743 udelay(10);
744 } while ( (send_status = (apic_read(APIC_ICR) & 0x1000))
745 && (timeout++ < 1000));
746 udelay(200);
747
748 accept_status = (apic_read(APIC_ESR) & 0xEF);
749 }
750
751 if (send_status)
752 printk("APIC never delivered???\n");
753 if (accept_status)
754 printk("APIC delivery error (%lx).\n", accept_status);
755
756 if( !(send_status || accept_status) )
757 {
758 for(timeout=0;timeout<50000;timeout++)
759 {
760 if(cpu_callin_map[0]&(1<<i))
761 break;
762 udelay(100);
763 }
764 if(cpu_callin_map[0]&(1<<i))
765 {
766 cpucount++;
767
768 cpu_number_map[i] = cpucount;
769 }
770 else
771 {
772 if(*((volatile unsigned char *)8192)==0xA5)
773 printk("Stuck ??\n");
774 else
775 printk("Not responding.\n");
776 }
777 }
778
779
780 *((volatile unsigned long *)8192) = 0;
781 }
782
783
784
785
786
787 if (cpu_number_map[i] == -1)
788 cpu_present_map &= ~(1 << i);
789 }
790
791
792
793
794
795
796
797
798
799 cfg = pg0[0];
800 pg0[0] = 3;
801 local_invalidate();
802
803
804
805
806
807
808 CMOS_WRITE(0, 0xf);
809
810 *((volatile long *) 0x467) = 0;
811
812
813
814
815
816 pg0[0] = cfg;
817 local_invalidate();
818
819
820
821
822
823 if(cpucount==0)
824 {
825 printk("Error: only one processor found.\n");
826 cpu_present_map=(1<<smp_processor_id());
827 }
828 else
829 {
830 unsigned long bogosum=0;
831 for(i=0;i<32;i++)
832 {
833 if(cpu_present_map&(1<<i))
834 bogosum+=cpu_data[i].udelay_val;
835 }
836 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
837 cpucount+1,
838 (bogosum+2500)/500000,
839 ((bogosum+2500)/5000)%100);
840 smp_activated=1;
841 smp_num_cpus=cpucount+1;
842 }
843 }
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859 void smp_message_pass(int target, int msg, unsigned long data, int wait)
860 {
861 unsigned long cfg;
862 unsigned long target_map;
863 int p=smp_processor_id();
864 int irq=0x2d;
865 int ct=0;
866 static volatile int message_cpu = NO_PROC_ID;
867
868
869
870
871
872 if(!smp_activated || !smp_commenced)
873 return;
874
875
876
877
878
879
880
881
882 if(msg==MSG_RESCHEDULE)
883 {
884 irq=0x30;
885 if(smp_cpu_in_msg[p])
886 return;
887 }
888
889
890
891
892
893
894
895 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
896 {
897 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
898 smp_processor_id(),msg,message_cpu, smp_msg_id);
899 }
900 message_cpu=smp_processor_id();
901
902
903
904
905
906
907 smp_cpu_in_msg[p]++;
908
909
910
911
912
913 if(msg!=MSG_RESCHEDULE)
914 {
915 smp_src_cpu=p;
916 smp_msg_id=msg;
917 smp_msg_data=data;
918 }
919
920
921
922
923
924
925
926
927
928 while(ct<1000)
929 {
930 cfg=apic_read(APIC_ICR);
931 if(!(cfg&(1<<12)))
932 break;
933 ct++;
934 udelay(10);
935 }
936
937
938
939
940
941 if(ct==1000)
942 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
943
944
945
946
947
948 cfg=apic_read(APIC_ICR2);
949 cfg&=0x00FFFFFF;
950 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));
951 cfg=apic_read(APIC_ICR);
952 cfg&=~0xFDFFF;
953 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;
954
955
956
957
958
959 if(target==MSG_ALL_BUT_SELF)
960 {
961 cfg|=APIC_DEST_ALLBUT;
962 target_map=cpu_present_map;
963 cpu_callin_map[0]=(1<<smp_src_cpu);
964 }
965 else if(target==MSG_ALL)
966 {
967 cfg|=APIC_DEST_ALLINC;
968 target_map=cpu_present_map;
969 cpu_callin_map[0]=0;
970 }
971 else
972 {
973 target_map=(1<<target);
974 cpu_callin_map[0]=0;
975 }
976
977
978
979
980
981 apic_write(APIC_ICR, cfg);
982
983
984
985
986
987 switch(wait)
988 {
989 case 1:
990 while(cpu_callin_map[0]!=target_map);
991 break;
992 case 2:
993 while(smp_invalidate_needed);
994 break;
995 }
996
997
998
999
1000
1001 smp_cpu_in_msg[p]--;
1002 message_cpu=NO_PROC_ID;
1003 }
1004
1005
1006
1007
1008
1009
1010
1011 void smp_invalidate(void)
1012 {
1013 unsigned long flags;
1014 if(smp_activated && smp_processor_id()!=active_kernel_processor)
1015 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
1026
1027
1028
1029
1030
1031
1032 save_flags(flags);
1033 cli();
1034 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
1035
1036
1037
1038
1039
1040 local_invalidate();
1041
1042 restore_flags(flags);
1043
1044
1045
1046
1047
1048
1049 }
1050
1051
1052
1053
1054
1055 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
1056 {
1057 #ifdef DEBUGGING_SMP_RESCHED
1058 static int ct=0;
1059 if(ct==0)
1060 {
1061 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
1062 ct=1;
1063 }
1064 #endif
1065 if(smp_processor_id()!=active_kernel_processor)
1066 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
1067 smp_processor_id(), active_kernel_processor);
1068
1069 need_resched=1;
1070
1071
1072
1073
1074 apic_read(APIC_SPIV);
1075 apic_write(APIC_EOI, 0);
1076 }
1077
1078
1079
1080
1081
1082 void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs)
1083 {
1084 int i=smp_processor_id();
1085
1086
1087
1088 switch(smp_msg_id)
1089 {
1090 case 0:
1091 return;
1092
1093
1094
1095
1096
1097 case MSG_INVALIDATE_TLB:
1098 if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
1099 local_invalidate();
1100 set_bit(i, (unsigned long *)&cpu_callin_map[0]);
1101 cpu_callin_map[0]|=1<<smp_processor_id();
1102 break;
1103
1104
1105
1106
1107 case MSG_STOP_CPU:
1108 while(1)
1109 {
1110 if(cpu_data[smp_processor_id()].hlt_works_ok)
1111 __asm__("hlt");
1112 }
1113 default:
1114 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1115 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1116 break;
1117 }
1118
1119
1120
1121
1122 apic_read(APIC_SPIV);
1123 apic_write(APIC_EOI, 0);
1124 }