This source file includes following definitions.
- mpf_checksum
- mpc_family
- smp_read_mpc
- smp_scan_config
- install_trampoline
- smp_alloc_memory
- get_kernel_stack
- smp_store_cpu_info
- smp_commence
- smp_callin
- smp_boot_cpus
- smp_message_pass
- smp_invalidate
- smp_reschedule_irq
- smp_message_irq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/config.h>
25 #include <linux/timer.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <linux/mc146818rtc.h>
31 #include <asm/i82489.h>
32 #include <linux/smp.h>
33 #include <asm/pgtable.h>
34 #include <asm/bitops.h>
35 #include <asm/pgtable.h>
36 #include <asm/smp.h>
37
38 static int smp_found_config=0;
39
40 unsigned long cpu_present_map = 0;
41 int smp_num_cpus;
42 int smp_threads_ready=0;
43 volatile unsigned long cpu_number_map[NR_CPUS];
44 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
45 volatile unsigned long smp_invalidate_needed;
46 struct cpuinfo_x86 cpu_data[NR_CPUS];
47 static unsigned int num_processors = 1;
48 static unsigned long io_apic_addr = 0;
49 unsigned char boot_cpu_id = 0;
50 static unsigned char *kstack_base,*kstack_end;
51 static int smp_activated = 0;
52 static volatile int smp_commenced=0;
53 unsigned long apic_addr=0xFEE00000;
54 unsigned long nlong = 0;
55 unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;
56 unsigned long apic_retval;
57 unsigned char *kernel_stacks[NR_CPUS];
58
59 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
60 static volatile unsigned long smp_msg_data;
61 static volatile int smp_src_cpu;
62 static volatile int smp_msg_id;
63
64 volatile unsigned long kernel_flag=0;
65 volatile unsigned char active_kernel_processor = NO_PROC_ID;
66 volatile unsigned long kernel_counter=0;
67 volatile unsigned long syscall_count=0;
68
69 volatile unsigned long ipi_count;
70 #ifdef __SMP_PROF__
71 volatile unsigned long smp_spins[NR_CPUS]={0};
72 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
73 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
74 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
75 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
76 #endif
77 #if defined (__SMP_PROF__)
78 volatile unsigned long smp_idle_map=0;
79 #endif
80
81
82
83 #ifdef SMP_DEBUG
84 #define SMP_PRINTK(x) printk x
85 #else
86 #define SMP_PRINTK(x)
87 #endif
88
89
90
91
92
93
94 static int mpf_checksum(unsigned char *mp, int len)
95 {
96 int sum=0;
97 while(len--)
98 sum+=*mp++;
99 return sum&0xFF;
100 }
101
102
103
104
105
106 static char *mpc_family(int family,int model)
107 {
108 static char n[32];
109 static char *model_defs[]=
110 {
111 "80486DX","80486DX",
112 "80486SX","80486DX/2 or 80487",
113 "80486SL","Intel5X2(tm)",
114 "Unknown","Unknown",
115 "80486DX/4"
116 };
117 if(family==0x5)
118 return("Pentium(tm)");
119 if(family==0x0F && model==0x0F)
120 return("Special controller");
121 if(family==0x04 && model<9)
122 return model_defs[model];
123 sprintf(n,"Unknown CPU [%d:%d]",family, model);
124 return n;
125 }
126
127
128
129
130
131 static int smp_read_mpc(struct mp_config_table *mpc)
132 {
133 char str[16];
134 int count=sizeof(*mpc);
135 int apics=0;
136 unsigned char *mpt=((unsigned char *)mpc)+count;
137
138 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
139 {
140 printk("Bad signature [%c%c%c%c].\n",
141 mpc->mpc_signature[0],
142 mpc->mpc_signature[1],
143 mpc->mpc_signature[2],
144 mpc->mpc_signature[3]);
145 return 1;
146 }
147 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
148 {
149 printk("Checksum error.\n");
150 return 1;
151 }
152 if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
153 {
154 printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
155 return 1;
156 }
157 memcpy(str,mpc->mpc_oem,8);
158 str[8]=0;
159 printk("OEM ID: %s ",str);
160 memcpy(str,mpc->mpc_productid,12);
161 str[12]=0;
162 printk("Product ID: %s ",str);
163 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
164
165
166 apic_addr = mpc->mpc_lapic;
167
168
169
170
171
172 while(count<mpc->mpc_length)
173 {
174 switch(*mpt)
175 {
176 case MP_PROCESSOR:
177 {
178 struct mpc_config_processor *m=
179 (struct mpc_config_processor *)mpt;
180 if(m->mpc_cpuflag&CPU_ENABLED)
181 {
182 printk("Processor #%d %s APIC version %d\n",
183 m->mpc_apicid,
184 mpc_family((m->mpc_cpufeature&
185 CPU_FAMILY_MASK)>>8,
186 (m->mpc_cpufeature&
187 CPU_MODEL_MASK)>>4),
188 m->mpc_apicver);
189 #ifdef SMP_DEBUG
190 if(m->mpc_featureflag&(1<<0))
191 printk(" Floating point unit present.\n");
192 if(m->mpc_featureflag&(1<<7))
193 printk(" Machine Exception supported.\n");
194 if(m->mpc_featureflag&(1<<8))
195 printk(" 64 bit compare & exchange supported.\n");
196 if(m->mpc_featureflag&(1<<9))
197 printk(" Internal APIC present.\n");
198 #endif
199 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
200 {
201 SMP_PRINTK((" Bootup CPU\n"));
202 boot_cpu_id=m->mpc_apicid;
203 nlong = boot_cpu_id<<24;
204 }
205 else
206 num_processors++;
207
208 if(m->mpc_apicid>NR_CPUS)
209 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
210 else
211 cpu_present_map|=(1<<m->mpc_apicid);
212 }
213 mpt+=sizeof(*m);
214 count+=sizeof(*m);
215 break;
216 }
217 case MP_BUS:
218 {
219 struct mpc_config_bus *m=
220 (struct mpc_config_bus *)mpt;
221 memcpy(str,m->mpc_bustype,6);
222 str[6]=0;
223 SMP_PRINTK(("Bus #%d is %s\n",
224 m->mpc_busid,
225 str));
226 mpt+=sizeof(*m);
227 count+=sizeof(*m);
228 break;
229 }
230 case MP_IOAPIC:
231 {
232 struct mpc_config_ioapic *m=
233 (struct mpc_config_ioapic *)mpt;
234 if(m->mpc_flags&MPC_APIC_USABLE)
235 {
236 apics++;
237 printk("I/O APIC #%d Version %d at 0x%lX.\n",
238 m->mpc_apicid,m->mpc_apicver,
239 m->mpc_apicaddr);
240 io_apic_addr = m->mpc_apicaddr;
241 }
242 mpt+=sizeof(*m);
243 count+=sizeof(*m);
244 break;
245 }
246 case MP_INTSRC:
247 {
248 struct mpc_config_intsrc *m=
249 (struct mpc_config_intsrc *)mpt;
250
251 mpt+=sizeof(*m);
252 count+=sizeof(*m);
253 break;
254 }
255 case MP_LINTSRC:
256 {
257 struct mpc_config_intlocal *m=
258 (struct mpc_config_intlocal *)mpt;
259 mpt+=sizeof(*m);
260 count+=sizeof(*m);
261 break;
262 }
263 }
264 }
265 if(apics>1)
266 printk("Warning: Multiple APIC's not supported.\n");
267 return num_processors;
268 }
269
270
271
272
273
274 void smp_scan_config(unsigned long base, unsigned long length)
275 {
276 unsigned long *bp=(unsigned long *)base;
277 struct intel_mp_floating *mpf;
278
279 SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
280 bp,length));
281 if(sizeof(*mpf)!=16)
282 printk("Error: MPF size\n");
283
284 while(length>0)
285 {
286 if(*bp==SMP_MAGIC_IDENT)
287 {
288 mpf=(struct intel_mp_floating *)bp;
289 if(mpf->mpf_length==1 &&
290 !mpf_checksum((unsigned char *)bp,16) &&
291 (mpf->mpf_specification == 1
292 || mpf->mpf_specification == 4) )
293 {
294 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
295 if(mpf->mpf_feature2&(1<<7))
296 printk(" IMCR and PIC compatibility mode.\n");
297 else
298 printk(" Virtual Wire compatibility mode.\n");
299 smp_found_config=1;
300
301
302
303 if(mpf->mpf_feature1!=0)
304 {
305 num_processors=2;
306 printk("I/O APIC at 0xFEC00000.\n");
307 printk("Bus#0 is ");
308 }
309 switch(mpf->mpf_feature1)
310 {
311 case 1:
312 printk("ISA");
313 break;
314 case 2:
315 printk("EISA with no IRQ8 chaining");
316 break;
317 case 3:
318 printk("EISA");
319 break;
320 case 4:
321 printk("MCA");
322 break;
323 case 5:
324 printk("ISA\nBus#1 is PCI");
325 break;
326 case 6:
327 printk("EISA\nBus #1 is PCI");
328 break;
329 case 7:
330 printk("MCA\nBus #1 is PCI");
331 break;
332 case 0:
333 break;
334 default:
335 printk("???\nUnknown standard configuration %d\n",
336 mpf->mpf_feature1);
337 return;
338 }
339
340
341
342
343 if(mpf->mpf_physptr)
344 smp_read_mpc((void *)mpf->mpf_physptr);
345 else
346 cpu_present_map=3;
347 printk("Processors: %d\n", num_processors);
348 }
349 }
350 bp+=4;
351 length-=16;
352 }
353 }
354
355
356
357
358
359 static unsigned char trampoline_data[]={
360 #include "trampoline.hex"
361 };
362
363
364
365
366
367
368
369 static void install_trampoline(unsigned char *mp)
370 {
371 memcpy(mp,trampoline_data,sizeof(trampoline_data));
372 }
373
374
375
376
377
378
379
380
381 unsigned long smp_alloc_memory(unsigned long mem_base)
382 {
383 int size=(num_processors-1)*PAGE_SIZE;
384
385
386
387
388
389 if(mem_base+size>=0x9F000)
390 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
391 kstack_base=(void *)mem_base;
392 mem_base+=size;
393 kstack_end=(void *)mem_base;
394 return mem_base;
395 }
396
397
398
399
400
401 static void *get_kernel_stack(void)
402 {
403 void *stack=kstack_base;
404 if(kstack_base>=kstack_end)
405 return NULL;
406 kstack_base+=PAGE_SIZE;
407 return stack;
408 }
409
410
411
412
413
414
415
416 void smp_store_cpu_info(int id)
417 {
418 struct cpuinfo_x86 *c=&cpu_data[id];
419 c->hard_math=hard_math;
420 c->x86=x86;
421 c->x86_model=x86_model;
422 c->x86_mask=x86_mask;
423 c->x86_capability=x86_capability;
424 c->fdiv_bug=fdiv_bug;
425 c->wp_works_ok=wp_works_ok;
426 c->hlt_works_ok=hlt_works_ok;
427 c->udelay_val=loops_per_sec;
428 strcpy(c->x86_vendor_id, x86_vendor_id);
429 }
430
431
432
433
434
435
436
437
438
439
440
441 void smp_commence(void)
442 {
443
444
445
446 smp_commenced=1;
447 }
448
449 void smp_callin(void)
450 {
451 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
452 unsigned long l;
453
454
455
456
457 SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
458 l=apic_read(APIC_SPIV);
459 l|=(1<<8);
460 apic_write(APIC_SPIV,l);
461 sti();
462
463
464
465 calibrate_delay();
466
467
468
469 smp_store_cpu_info(cpuid);
470
471
472
473 set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
474
475
476
477 load_ldt(0);
478
479
480 local_invalidate();
481 while(!smp_commenced);
482 if (cpu_number_map[cpuid] == -1)
483 while(1);
484 local_invalidate();
485 SMP_PRINTK(("Commenced..\n"));
486
487 load_TR(cpu_number_map[cpuid]);
488
489 }
490
491
492
493
494
495 void smp_boot_cpus(void)
496 {
497 int i=0;
498 int cpucount=0;
499 void *stack;
500 extern unsigned long init_user_stack[];
501
502
503
504
505
506
507 if (1<cpu_present_map)
508 apic_reg = vremap(0xFEE00000,4096);
509
510
511 if(apic_reg == NULL)
512 panic("Unable to map local apic.\n");
513
514 #ifdef SMP_DEBUG
515 {
516 int reg;
517
518 reg = apic_read(APIC_VERSION);
519 printk("Getting VERSION: %x\n", reg);
520
521 apic_write(APIC_VERSION, 0);
522 reg = apic_read(APIC_VERSION);
523 printk("Getting VERSION: %x\n", reg);
524
525 reg = apic_read(APIC_LVT0);
526 printk("Getting LVT0: %x\n", reg);
527
528 reg = apic_read(APIC_LVT1);
529 printk("Getting LVT1: %x\n", reg);
530 }
531 #endif
532
533
534
535
536
537 kernel_stacks[boot_cpu_id]=(void *)init_user_stack;
538
539 smp_store_cpu_info(boot_cpu_id);
540
541 active_kernel_processor=boot_cpu_id;
542
543 SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
544
545 for(i=0;i<NR_CPUS;i++)
546 {
547 if((cpu_present_map&(1<<i)) && i!=boot_cpu_id)
548 {
549 unsigned long cfg, send_status, accept_status;
550 int timeout;
551
552
553
554
555
556 stack=get_kernel_stack();
557 if(stack==NULL)
558 panic("No memory for processor stacks.\n");
559 kernel_stacks[i]=stack;
560 install_trampoline(stack);
561
562 printk("Booting processor %d stack %p: ",i,stack);
563
564
565
566
567
568 cfg=apic_read(APIC_SPIV);
569 cfg|=(1<<8);
570 apic_write(APIC_SPIV,cfg);
571
572
573
574
575
576
577 SMP_PRINTK(("Setting warm reset code and vector.\n"));
578
579
580
581
582
583 CMOS_WRITE(0xa, 0xf);
584 pg0[0]=7;
585 *((volatile unsigned short *) 0x467) = ((unsigned long)stack)>>4;
586 *((volatile unsigned short *) 0x469) = 0;
587 pg0[0]= pte_val(mk_pte(0, PAGE_READONLY));
588
589
590
591
592
593 apic_write(APIC_ESR, 0);
594 accept_status = (apic_read(APIC_ESR) & 0xEF);
595
596
597
598
599
600 send_status = 0;
601 accept_status = 0;
602
603 SMP_PRINTK(("Asserting INIT.\n"));
604
605
606
607
608
609 cfg=apic_read(APIC_ICR2);
610 cfg&=0x00FFFFFF;
611 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
612 cfg=apic_read(APIC_ICR);
613 cfg&=~0xCDFFF;
614 cfg|=0x0000c500;
615 apic_write(APIC_ICR, cfg);
616
617 timeout = 0;
618 do {
619 udelay(1000);
620 if ((send_status = (!(apic_read(APIC_ICR) & 0x00001000))))
621 break;
622 } while (timeout++ < 1000);
623
624 #ifdef EEK2
625 if (send_status) {
626 apic_write(APIC_ESR, 0);
627 accept_status = (apic_read(APIC_ESR) & 0xEF);
628 }
629 #endif
630
631
632
633
634
635 if (send_status && !accept_status)
636 {
637 SMP_PRINTK(("Deasserting INIT.\n"));
638
639 cfg=apic_read(APIC_ICR2);
640 cfg&=0x00FFFFFF;
641 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
642 cfg=apic_read(APIC_ICR);
643 cfg&=~0xCDFFF;
644 cfg|=0x00008500;
645 apic_write(APIC_ICR, cfg);
646
647 timeout = 0;
648 do {
649 udelay(1000);
650 if ((send_status = !(apic_read(APIC_ICR) & 0x00001000) ))
651 break;
652 } while (timeout++ < 1000);
653
654 if (send_status) {
655 udelay(1000000);
656 apic_write(APIC_ESR, 0);
657 accept_status = (apic_read(APIC_ESR) & 0xEF);
658 }
659 }
660
661
662
663
664
665
666
667 if (send_status && !accept_status)
668 {
669 SMP_PRINTK(("Sending first STARTUP.\n"));
670
671
672
673
674
675 cfg=apic_read(APIC_ICR2);
676 cfg&=0x00FFFFFF;
677 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
678 cfg=apic_read(APIC_ICR);
679 cfg&=~0xCDFFF ;
680 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12);
681 apic_write(APIC_ICR, cfg);
682
683 timeout = 0;
684 do {
685 udelay(1000);
686 if ((send_status = !(apic_read(APIC_ICR) & 0x00001000)) )
687 break;
688 } while (timeout++ < 1000);
689
690 if (send_status) {
691 udelay(1000000);
692 apic_write(APIC_ESR, 0);
693 accept_status = (apic_read(APIC_ESR) & 0xEF);
694 }
695 }
696
697 if (send_status && !accept_status)
698 {
699 SMP_PRINTK(("Sending second STARTUP.\n"));
700
701
702
703
704
705 cfg=apic_read(APIC_ICR2);
706 cfg&=0x00FFFFFF;
707 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
708 cfg=apic_read(APIC_ICR);
709 cfg&=~0xCDFFF ;
710 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12);
711 apic_write(APIC_ICR, cfg);
712
713 timeout = 0;
714 do {
715 udelay(1000);
716 if ((send_status = !(apic_read(APIC_ICR) & 0x00001000)))
717 break;
718 } while (timeout++ < 1000);
719
720 if (send_status) {
721 udelay(1000000);
722 apic_write(APIC_ESR, 0);
723 accept_status = (apic_read(APIC_ESR) & 0xEF);
724 }
725 }
726
727 if (!send_status)
728 printk("APIC never delivered???\n");
729 else if (accept_status)
730 printk("APIC delivery error (%lx).\n", accept_status);
731 {
732 for(timeout=0;timeout<50000;timeout++)
733 {
734 if(cpu_callin_map[0]&(1<<i))
735 break;
736 udelay(100);
737 }
738 if(cpu_callin_map[0]&(1<<i))
739 {
740 cpucount++;
741
742 cpu_number_map[i] = cpucount;
743 }
744 else
745 {
746 if(*((volatile unsigned char *)8192)==0xA5)
747 printk("Stuck ??\n");
748 else
749 printk("Not responding val=(%lx).\n", *((unsigned long *) stack));
750 cpu_present_map&=~(1<<i);
751 cpu_number_map[i] = -1;
752 }
753 }
754
755
756 *((volatile unsigned long *)8192) = 0;
757 }
758 else if (i == boot_cpu_id)
759 {
760 cpu_number_map[i] = 0;
761 }
762 else
763 {
764 cpu_number_map[i] = -1;
765 }
766
767 }
768
769
770
771 if(cpucount==0)
772 {
773 printk("Error: only one processor found.\n");
774 cpu_present_map=(1<<smp_processor_id());
775 }
776 else
777 {
778 unsigned long bogosum=0;
779 for(i=0;i<32;i++)
780 {
781 if(cpu_present_map&(1<<i))
782 bogosum+=cpu_data[i].udelay_val;
783 }
784 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
785 cpucount+1,
786 (bogosum+2500)/500000,
787 ((bogosum+2500)/5000)%100);
788 smp_activated=1;
789 smp_num_cpus=cpucount+1;
790 }
791 }
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807 void smp_message_pass(int target, int msg, unsigned long data, int wait)
808 {
809 unsigned long cfg;
810 unsigned long target_map;
811 int p=smp_processor_id();
812 int irq=0x2d;
813 int ct=0;
814 static volatile int message_cpu = NO_PROC_ID;
815
816
817
818
819
820 if(!smp_activated || !smp_commenced)
821 return;
822
823
824
825
826
827
828
829
830 if(msg==MSG_RESCHEDULE)
831 {
832 irq=0x30;
833 if(smp_cpu_in_msg[p])
834 return;
835 }
836
837
838
839
840
841
842
843 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
844 {
845 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
846 smp_processor_id(),msg,message_cpu, smp_msg_id);
847 }
848 message_cpu=smp_processor_id();
849
850
851
852
853
854
855 smp_cpu_in_msg[p]++;
856
857
858
859
860
861 if(msg!=MSG_RESCHEDULE)
862 {
863 smp_src_cpu=p;
864 smp_msg_id=msg;
865 smp_msg_data=data;
866 }
867
868
869
870
871
872
873
874
875
876 while(ct<1000)
877 {
878 cfg=apic_read(APIC_ICR);
879 if(!(cfg&(1<<12)))
880 break;
881 ct++;
882 udelay(10);
883 }
884
885
886
887
888
889 if(ct==1000)
890 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
891
892
893
894
895
896 cfg=apic_read(APIC_ICR2);
897 cfg&=0x00FFFFFF;
898 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));
899 cfg=apic_read(APIC_ICR);
900 cfg&=~0xFDFFF;
901 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;
902
903
904
905
906
907 if(target==MSG_ALL_BUT_SELF)
908 {
909 cfg|=APIC_DEST_ALLBUT;
910 target_map=cpu_present_map;
911 cpu_callin_map[0]=(1<<smp_src_cpu);
912 }
913 else if(target==MSG_ALL)
914 {
915 cfg|=APIC_DEST_ALLINC;
916 target_map=cpu_present_map;
917 cpu_callin_map[0]=0;
918 }
919 else
920 {
921 target_map=(1<<target);
922 cpu_callin_map[0]=0;
923 }
924
925
926
927
928
929 apic_write(APIC_ICR, cfg);
930
931
932
933
934
935 switch(wait)
936 {
937 case 1:
938 while(cpu_callin_map[0]!=target_map);
939 break;
940 case 2:
941 while(smp_invalidate_needed);
942 break;
943 }
944
945
946
947
948
949 smp_cpu_in_msg[p]--;
950 message_cpu=NO_PROC_ID;
951 }
952
953
954
955
956
957
958
959 void smp_invalidate(void)
960 {
961 unsigned long flags;
962 if(smp_activated && smp_processor_id()!=active_kernel_processor)
963 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
964
965
966
967
968
969
970
971
972
973 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
974
975
976
977
978
979
980 save_flags(flags);
981 cli();
982 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
983
984
985
986
987
988 local_invalidate();
989
990 restore_flags(flags);
991
992
993
994
995
996
997 }
998
999
1000
1001
1002
1003 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
1004 {
1005 #ifdef DEBUGGING_SMP_RESCHED
1006 static int ct=0;
1007 if(ct==0)
1008 {
1009 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
1010 ct=1;
1011 }
1012 #endif
1013 if(smp_processor_id()!=active_kernel_processor)
1014 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
1015 smp_processor_id(), active_kernel_processor);
1016
1017
1018
1019
1020 if (user_mode(regs))
1021 {
1022 current->utime++;
1023 if (current->pid)
1024 {
1025 if (current->priority < 15)
1026 kstat.cpu_nice++;
1027 else
1028 kstat.cpu_user++;
1029 }
1030
1031 if (current->it_virt_value && !(--current->it_virt_value)) {
1032 current->it_virt_value = current->it_virt_incr;
1033 send_sig(SIGVTALRM,current,1);
1034 }
1035 } else {
1036 current->stime++;
1037 if(current->pid)
1038 kstat.cpu_system++;
1039 #ifdef CONFIG_PROFILE
1040 if (prof_buffer && current->pid) {
1041 extern int _stext;
1042 unsigned long eip = regs->eip - (unsigned long) &_stext;
1043 eip >>= CONFIG_PROFILE_SHIFT;
1044 if (eip < prof_len)
1045 prof_buffer[eip]++;
1046 }
1047 #endif
1048 }
1049
1050
1051
1052 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
1053 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
1054 send_sig(SIGKILL, current, 1);
1055 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
1056 (((current->stime + current->utime) % HZ) == 0)) {
1057 unsigned long psecs = (current->stime + current->utime) / HZ;
1058
1059 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
1060 send_sig(SIGXCPU, current, 1);
1061
1062 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
1063 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
1064 send_sig(SIGXCPU, current, 1);
1065 }
1066
1067
1068 if (current->it_prof_value && !(--current->it_prof_value)) {
1069 current->it_prof_value = current->it_prof_incr;
1070 send_sig(SIGPROF,current,1);
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 if ( 0 > --current->counter || current->pid == 0)
1087 {
1088 current->counter = 0;
1089 need_resched=1;
1090 }
1091
1092
1093
1094
1095
1096 apic_read(APIC_SPIV);
1097 apic_write(APIC_EOI, 0);
1098 }
1099
1100
1101
1102
1103
1104 void smp_message_irq(int cpl, struct pt_regs *regs)
1105 {
1106 int i=smp_processor_id();
1107
1108
1109
1110 switch(smp_msg_id)
1111 {
1112 case 0:
1113 return;
1114
1115
1116
1117
1118
1119 case MSG_INVALIDATE_TLB:
1120 if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
1121 local_invalidate();
1122 set_bit(i, (unsigned long *)&cpu_callin_map[0]);
1123 cpu_callin_map[0]|=1<<smp_processor_id();
1124 break;
1125
1126
1127
1128
1129 case MSG_STOP_CPU:
1130 while(1)
1131 {
1132 if(cpu_data[smp_processor_id()].hlt_works_ok)
1133 __asm__("hlt");
1134 }
1135 default:
1136 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1137 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1138 break;
1139 }
1140
1141
1142
1143
1144 apic_read(APIC_SPIV);
1145 apic_write(APIC_EOI, 0);
1146 }