This source file includes following definitions.
- mpf_checksum
- mpc_family
- smp_read_mpc
- smp_scan_config
- install_trampoline
- smp_alloc_memory
- get_kernel_stack
- smp_store_cpu_info
- smp_commence
- smp_callin
- smp_boot_cpus
- smp_message_pass
- smp_invalidate
- smp_reschedule_irq
- smp_message_irq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/config.h>
27 #include <linux/timer.h>
28 #include <linux/sched.h>
29 #include <linux/mm.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/delay.h>
32 #include <linux/mc146818rtc.h>
33 #include <asm/i82489.h>
34 #include <linux/smp.h>
35 #include <asm/pgtable.h>
36 #include <asm/bitops.h>
37 #include <asm/pgtable.h>
38 #include <asm/smp.h>
39
40 int smp_found_config=0;
41
42 unsigned long cpu_present_map = 0;
43 int smp_num_cpus;
44 int smp_threads_ready=0;
45 volatile int cpu_number_map[NR_CPUS];
46 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
47 volatile unsigned long smp_invalidate_needed;
48 struct cpuinfo_x86 cpu_data[NR_CPUS];
49 static unsigned int num_processors = 1;
50 static unsigned long io_apic_addr = 0;
51 unsigned char boot_cpu_id = 0;
52 static unsigned char *kstack_base,*kstack_end;
53 static int smp_activated = 0;
54 int apic_version[NR_CPUS];
55 static volatile int smp_commenced=0;
56 unsigned long apic_addr=0xFEE00000;
57 unsigned long nlong = 0;
58 unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;
59 unsigned long apic_retval;
60 unsigned char *kernel_stacks[NR_CPUS];
61
62 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
63 static volatile unsigned long smp_msg_data;
64 static volatile int smp_src_cpu;
65 static volatile int smp_msg_id;
66
67 volatile unsigned long kernel_flag=0;
68 volatile unsigned char active_kernel_processor = NO_PROC_ID;
69 volatile unsigned long kernel_counter=0;
70 volatile unsigned long syscall_count=0;
71
72 volatile unsigned long ipi_count;
73 #ifdef __SMP_PROF__
74 volatile unsigned long smp_spins[NR_CPUS]={0};
75 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
76 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
77 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
78 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
79 #endif
80 #if defined (__SMP_PROF__)
81 volatile unsigned long smp_idle_map=0;
82 #endif
83
84
85
86 #ifdef SMP_DEBUG
87 #define SMP_PRINTK(x) printk x
88 #else
89 #define SMP_PRINTK(x)
90 #endif
91
92
93
94
95
96
97 static int mpf_checksum(unsigned char *mp, int len)
98 {
99 int sum=0;
100 while(len--)
101 sum+=*mp++;
102 return sum&0xFF;
103 }
104
105
106
107
108
109 static char *mpc_family(int family,int model)
110 {
111 static char n[32];
112 static char *model_defs[]=
113 {
114 "80486DX","80486DX",
115 "80486SX","80486DX/2 or 80487",
116 "80486SL","Intel5X2(tm)",
117 "Unknown","Unknown",
118 "80486DX/4"
119 };
120 if(family==0x6)
121 return("Pentium(tm) Pro");
122 if(family==0x5)
123 return("Pentium(tm)");
124 if(family==0x0F && model==0x0F)
125 return("Special controller");
126 if(family==0x04 && model<9)
127 return model_defs[model];
128 sprintf(n,"Unknown CPU [%d:%d]",family, model);
129 return n;
130 }
131
132
133
134
135
136 static int smp_read_mpc(struct mp_config_table *mpc)
137 {
138 char str[16];
139 int count=sizeof(*mpc);
140 int apics=0;
141 unsigned char *mpt=((unsigned char *)mpc)+count;
142
143 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
144 {
145 printk("Bad signature [%c%c%c%c].\n",
146 mpc->mpc_signature[0],
147 mpc->mpc_signature[1],
148 mpc->mpc_signature[2],
149 mpc->mpc_signature[3]);
150 return 1;
151 }
152 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
153 {
154 printk("Checksum error.\n");
155 return 1;
156 }
157 if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
158 {
159 printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
160 return 1;
161 }
162 memcpy(str,mpc->mpc_oem,8);
163 str[8]=0;
164 printk("OEM ID: %s ",str);
165 memcpy(str,mpc->mpc_productid,12);
166 str[12]=0;
167 printk("Product ID: %s ",str);
168 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
169
170
171 apic_addr = mpc->mpc_lapic;
172
173
174
175
176
177 while(count<mpc->mpc_length)
178 {
179 switch(*mpt)
180 {
181 case MP_PROCESSOR:
182 {
183 struct mpc_config_processor *m=
184 (struct mpc_config_processor *)mpt;
185 if(m->mpc_cpuflag&CPU_ENABLED)
186 {
187 printk("Processor #%d %s APIC version %d\n",
188 m->mpc_apicid,
189 mpc_family((m->mpc_cpufeature&
190 CPU_FAMILY_MASK)>>8,
191 (m->mpc_cpufeature&
192 CPU_MODEL_MASK)>>4),
193 m->mpc_apicver);
194 #ifdef SMP_DEBUG
195 if(m->mpc_featureflag&(1<<0))
196 printk(" Floating point unit present.\n");
197 if(m->mpc_featureflag&(1<<7))
198 printk(" Machine Exception supported.\n");
199 if(m->mpc_featureflag&(1<<8))
200 printk(" 64 bit compare & exchange supported.\n");
201 if(m->mpc_featureflag&(1<<9))
202 printk(" Internal APIC present.\n");
203 #endif
204 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
205 {
206 SMP_PRINTK((" Bootup CPU\n"));
207 boot_cpu_id=m->mpc_apicid;
208 nlong = boot_cpu_id<<24;
209 }
210 else
211 num_processors++;
212
213 if(m->mpc_apicid>NR_CPUS)
214 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
215 else
216 {
217 cpu_present_map|=(1<<m->mpc_apicid);
218 apic_version[m->mpc_apicid]=m->mpc_apicver;
219 }
220 }
221 mpt+=sizeof(*m);
222 count+=sizeof(*m);
223 break;
224 }
225 case MP_BUS:
226 {
227 struct mpc_config_bus *m=
228 (struct mpc_config_bus *)mpt;
229 memcpy(str,m->mpc_bustype,6);
230 str[6]=0;
231 SMP_PRINTK(("Bus #%d is %s\n",
232 m->mpc_busid,
233 str));
234 mpt+=sizeof(*m);
235 count+=sizeof(*m);
236 break;
237 }
238 case MP_IOAPIC:
239 {
240 struct mpc_config_ioapic *m=
241 (struct mpc_config_ioapic *)mpt;
242 if(m->mpc_flags&MPC_APIC_USABLE)
243 {
244 apics++;
245 printk("I/O APIC #%d Version %d at 0x%lX.\n",
246 m->mpc_apicid,m->mpc_apicver,
247 m->mpc_apicaddr);
248 io_apic_addr = m->mpc_apicaddr;
249 }
250 mpt+=sizeof(*m);
251 count+=sizeof(*m);
252 break;
253 }
254 case MP_INTSRC:
255 {
256 struct mpc_config_intsrc *m=
257 (struct mpc_config_intsrc *)mpt;
258
259 mpt+=sizeof(*m);
260 count+=sizeof(*m);
261 break;
262 }
263 case MP_LINTSRC:
264 {
265 struct mpc_config_intlocal *m=
266 (struct mpc_config_intlocal *)mpt;
267 mpt+=sizeof(*m);
268 count+=sizeof(*m);
269 break;
270 }
271 }
272 }
273 if(apics>1)
274 printk("Warning: Multiple APIC's not supported.\n");
275 return num_processors;
276 }
277
278
279
280
281
282 void smp_scan_config(unsigned long base, unsigned long length)
283 {
284 unsigned long *bp=(unsigned long *)base;
285 struct intel_mp_floating *mpf;
286
287 SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
288 bp,length));
289 if(sizeof(*mpf)!=16)
290 printk("Error: MPF size\n");
291
292 while(length>0)
293 {
294 if(*bp==SMP_MAGIC_IDENT)
295 {
296 mpf=(struct intel_mp_floating *)bp;
297 if(mpf->mpf_length==1 &&
298 !mpf_checksum((unsigned char *)bp,16) &&
299 (mpf->mpf_specification == 1
300 || mpf->mpf_specification == 4) )
301 {
302 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
303 if(mpf->mpf_feature2&(1<<7))
304 printk(" IMCR and PIC compatibility mode.\n");
305 else
306 printk(" Virtual Wire compatibility mode.\n");
307 smp_found_config=1;
308
309
310
311 if(mpf->mpf_feature1!=0)
312 {
313 num_processors=2;
314 printk("I/O APIC at 0xFEC00000.\n");
315 printk("Bus#0 is ");
316 }
317 switch(mpf->mpf_feature1)
318 {
319 case 1:
320 printk("ISA");
321 break;
322 case 2:
323 printk("EISA with no IRQ8 chaining");
324 break;
325 case 3:
326 printk("EISA");
327 break;
328 case 4:
329 printk("MCA");
330 break;
331 case 5:
332 printk("ISA\nBus#1 is PCI");
333 break;
334 case 6:
335 printk("EISA\nBus #1 is PCI");
336 break;
337 case 7:
338 printk("MCA\nBus #1 is PCI");
339 break;
340 case 0:
341 break;
342 default:
343 printk("???\nUnknown standard configuration %d\n",
344 mpf->mpf_feature1);
345 return;
346 }
347
348
349
350
351 if(mpf->mpf_physptr)
352 smp_read_mpc((void *)mpf->mpf_physptr);
353 else
354 cpu_present_map=3;
355 printk("Processors: %d\n", num_processors);
356
357
358
359 return;
360 }
361 }
362 bp+=4;
363 length-=16;
364 }
365 }
366
367
368
369
370
371 static unsigned char trampoline_data[]={
372 #include "trampoline.hex"
373 };
374
375
376
377
378
379
380
381 static void install_trampoline(unsigned char *mp)
382 {
383 memcpy(mp,trampoline_data,sizeof(trampoline_data));
384 }
385
386
387
388
389
390
391
392
393 unsigned long smp_alloc_memory(unsigned long mem_base)
394 {
395 int size=(num_processors-1)*PAGE_SIZE;
396
397
398
399
400
401 if(mem_base+size>=0x9F000)
402 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
403 kstack_base=(void *)mem_base;
404 mem_base+=size;
405 kstack_end=(void *)mem_base;
406 return mem_base;
407 }
408
409
410
411
412
413 static void *get_kernel_stack(void)
414 {
415 void *stack=kstack_base;
416 if(kstack_base>=kstack_end)
417 return NULL;
418 kstack_base+=PAGE_SIZE;
419 return stack;
420 }
421
422
423
424
425
426
427
428 void smp_store_cpu_info(int id)
429 {
430 struct cpuinfo_x86 *c=&cpu_data[id];
431 c->hard_math=hard_math;
432 c->x86=x86;
433 c->x86_model=x86_model;
434 c->x86_mask=x86_mask;
435 c->x86_capability=x86_capability;
436 c->fdiv_bug=fdiv_bug;
437 c->wp_works_ok=wp_works_ok;
438 c->hlt_works_ok=hlt_works_ok;
439 c->udelay_val=loops_per_sec;
440 strcpy(c->x86_vendor_id, x86_vendor_id);
441 }
442
443
444
445
446
447
448
449
450
451
452
453 void smp_commence(void)
454 {
455
456
457
458 smp_commenced=1;
459 }
460
461 void smp_callin(void)
462 {
463 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
464 unsigned long l;
465
466
467
468
469 SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
470 l=apic_read(APIC_SPIV);
471 l|=(1<<8);
472 apic_write(APIC_SPIV,l);
473 sti();
474
475
476
477 calibrate_delay();
478
479
480
481 smp_store_cpu_info(cpuid);
482
483
484
485 set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
486
487
488
489 load_ldt(0);
490
491
492 local_invalidate();
493 while(!smp_commenced);
494 if (cpu_number_map[cpuid] == -1)
495 while(1);
496 local_invalidate();
497 SMP_PRINTK(("Commenced..\n"));
498
499 load_TR(cpu_number_map[cpuid]);
500
501 }
502
503
504
505
506
507 void smp_boot_cpus(void)
508 {
509 int i,j;
510 int cpucount=0;
511 unsigned long cfg;
512 void *stack;
513 extern unsigned long init_user_stack[];
514
515
516
517
518
519 for (i = 0; i < NR_CPUS; i++)
520 cpu_number_map[i] = -1;
521
522
523
524
525
526 kernel_stacks[boot_cpu_id]=(void *)init_user_stack;
527
528 smp_store_cpu_info(boot_cpu_id);
529
530 cpu_present_map |= (1 << smp_processor_id());
531 cpu_number_map[boot_cpu_id] = 0;
532 active_kernel_processor=boot_cpu_id;
533
534
535
536
537
538
539 if (!smp_found_config)
540 return;
541
542
543
544
545
546 apic_reg = vremap(apic_addr,4096);
547
548 if(apic_reg == NULL)
549 panic("Unable to map local apic.\n");
550
551 #ifdef SMP_DEBUG
552 {
553 int reg;
554
555
556
557
558
559
560
561
562 reg = apic_read(APIC_VERSION);
563 SMP_PRINTK(("Getting VERSION: %x\n", reg));
564
565 apic_write(APIC_VERSION, 0);
566 reg = apic_read(APIC_VERSION);
567 SMP_PRINTK(("Getting VERSION: %x\n", reg));
568
569
570
571
572
573
574
575
576
577
578
579
580
581 reg = apic_read(APIC_LVT0);
582 SMP_PRINTK(("Getting LVT0: %x\n", reg));
583
584 reg = apic_read(APIC_LVT1);
585 SMP_PRINTK(("Getting LVT1: %x\n", reg));
586 }
587 #endif
588
589
590
591
592
593 cfg=apic_read(APIC_SPIV);
594 cfg|=(1<<8);
595 apic_write(APIC_SPIV,cfg);
596
597 udelay(10);
598
599
600
601
602
603 SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
604
605 for(i=0;i<NR_CPUS;i++)
606 {
607
608
609
610 if (i == boot_cpu_id)
611 continue;
612
613 if (cpu_present_map & (1 << i))
614 {
615 unsigned long send_status, accept_status;
616 int timeout, num_starts;
617
618
619
620
621
622 stack=get_kernel_stack();
623 if(stack==NULL)
624 panic("No memory for processor stacks.\n");
625 kernel_stacks[i]=stack;
626 install_trampoline(stack);
627
628 printk("Booting processor %d stack %p: ",i,stack);
629
630
631
632
633
634
635 SMP_PRINTK(("Setting warm reset code and vector.\n"));
636
637
638
639
640
641 cfg=pg0[0];
642
643 CMOS_WRITE(0xa, 0xf);
644 pg0[0]=7;
645 local_invalidate();
646 *((volatile unsigned short *) 0x469) = ((unsigned long)stack)>>4;
647 *((volatile unsigned short *) 0x467) = 0;
648
649
650
651
652
653 pg0[0]= cfg;
654 local_invalidate();
655
656
657
658
659
660 apic_write(APIC_ESR, 0);
661 accept_status = (apic_read(APIC_ESR) & 0xEF);
662
663
664
665
666
667 send_status = 0;
668 accept_status = 0;
669
670
671
672
673
674 SMP_PRINTK(("Asserting INIT.\n"));
675
676
677
678
679
680 cfg=apic_read(APIC_ICR2);
681 cfg&=0x00FFFFFF;
682 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
683 cfg=apic_read(APIC_ICR);
684 cfg&=~0xCDFFF;
685 cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
686 | APIC_DEST_ASSERT | APIC_DEST_DM_INIT);
687 apic_write(APIC_ICR, cfg);
688
689 udelay(200);
690 SMP_PRINTK(("Deasserting INIT.\n"));
691
692 cfg=apic_read(APIC_ICR2);
693 cfg&=0x00FFFFFF;
694 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
695 cfg=apic_read(APIC_ICR);
696 cfg&=~0xCDFFF;
697 cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
698 | APIC_DEST_DM_INIT);
699 apic_write(APIC_ICR, cfg);
700
701
702
703
704
705
706
707
708
709 if ( apic_version[i] & 0xF0 )
710 num_starts = 2;
711 else
712 num_starts = 0;
713
714
715
716
717
718 for (j = 0; !(send_status || accept_status)
719 && (j < num_starts) ; j++)
720 {
721 SMP_PRINTK(("Sending STARTUP #%d.\n",j));
722
723 apic_write(APIC_ESR, 0);
724
725
726
727
728
729 cfg=apic_read(APIC_ICR2);
730 cfg&=0x00FFFFFF;
731 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
732 cfg=apic_read(APIC_ICR);
733 cfg&=~0xCDFFF;
734 cfg |= (APIC_DEST_FIELD
735 | APIC_DEST_DM_STARTUP
736 | (((int) stack) >> 12) );
737 apic_write(APIC_ICR, cfg);
738
739 timeout = 0;
740 do {
741 udelay(10);
742 } while ( (send_status = (apic_read(APIC_ICR) & 0x1000))
743 && (timeout++ < 1000));
744 udelay(200);
745
746 accept_status = (apic_read(APIC_ESR) & 0xEF);
747 }
748
749 if (send_status)
750 printk("APIC never delivered???\n");
751 if (accept_status)
752 printk("APIC delivery error (%lx).\n", accept_status);
753
754 if( !(send_status || accept_status) )
755 {
756 for(timeout=0;timeout<50000;timeout++)
757 {
758 if(cpu_callin_map[0]&(1<<i))
759 break;
760 udelay(100);
761 }
762 if(cpu_callin_map[0]&(1<<i))
763 {
764 cpucount++;
765
766 cpu_number_map[i] = cpucount;
767 }
768 else
769 {
770 if(*((volatile unsigned char *)8192)==0xA5)
771 printk("Stuck ??\n");
772 else
773 printk("Not responding.\n");
774 }
775 }
776
777
778 *((volatile unsigned long *)8192) = 0;
779 }
780
781
782
783
784
785 if (cpu_number_map[i] == -1)
786 cpu_present_map &= ~(1 << i);
787 }
788
789
790
791
792
793
794
795
796
797 cfg = pg0[0];
798 pg0[0] = 3;
799 local_invalidate();
800
801
802
803
804
805
806 CMOS_WRITE(0, 0xf);
807
808 *((volatile long *) 0x467) = 0;
809
810
811
812
813
814 pg0[0] = cfg;
815 local_invalidate();
816
817
818
819
820
821 if(cpucount==0)
822 {
823 printk("Error: only one processor found.\n");
824 cpu_present_map=(1<<smp_processor_id());
825 }
826 else
827 {
828 unsigned long bogosum=0;
829 for(i=0;i<32;i++)
830 {
831 if(cpu_present_map&(1<<i))
832 bogosum+=cpu_data[i].udelay_val;
833 }
834 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
835 cpucount+1,
836 (bogosum+2500)/500000,
837 ((bogosum+2500)/5000)%100);
838 smp_activated=1;
839 smp_num_cpus=cpucount+1;
840 }
841 }
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857 void smp_message_pass(int target, int msg, unsigned long data, int wait)
858 {
859 unsigned long cfg;
860 unsigned long target_map;
861 int p=smp_processor_id();
862 int irq=0x2d;
863 int ct=0;
864 static volatile int message_cpu = NO_PROC_ID;
865
866
867
868
869
870 if(!smp_activated || !smp_commenced)
871 return;
872
873
874
875
876
877
878
879
880 if(msg==MSG_RESCHEDULE)
881 {
882 irq=0x30;
883 if(smp_cpu_in_msg[p])
884 return;
885 }
886
887
888
889
890
891
892
893 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
894 {
895 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
896 smp_processor_id(),msg,message_cpu, smp_msg_id);
897 }
898 message_cpu=smp_processor_id();
899
900
901
902
903
904
905 smp_cpu_in_msg[p]++;
906
907
908
909
910
911 if(msg!=MSG_RESCHEDULE)
912 {
913 smp_src_cpu=p;
914 smp_msg_id=msg;
915 smp_msg_data=data;
916 }
917
918
919
920
921
922
923
924
925
926 while(ct<1000)
927 {
928 cfg=apic_read(APIC_ICR);
929 if(!(cfg&(1<<12)))
930 break;
931 ct++;
932 udelay(10);
933 }
934
935
936
937
938
939 if(ct==1000)
940 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
941
942
943
944
945
946 cfg=apic_read(APIC_ICR2);
947 cfg&=0x00FFFFFF;
948 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));
949 cfg=apic_read(APIC_ICR);
950 cfg&=~0xFDFFF;
951 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;
952
953
954
955
956
957 if(target==MSG_ALL_BUT_SELF)
958 {
959 cfg|=APIC_DEST_ALLBUT;
960 target_map=cpu_present_map;
961 cpu_callin_map[0]=(1<<smp_src_cpu);
962 }
963 else if(target==MSG_ALL)
964 {
965 cfg|=APIC_DEST_ALLINC;
966 target_map=cpu_present_map;
967 cpu_callin_map[0]=0;
968 }
969 else
970 {
971 target_map=(1<<target);
972 cpu_callin_map[0]=0;
973 }
974
975
976
977
978
979 apic_write(APIC_ICR, cfg);
980
981
982
983
984
985 switch(wait)
986 {
987 case 1:
988 while(cpu_callin_map[0]!=target_map);
989 break;
990 case 2:
991 while(smp_invalidate_needed);
992 break;
993 }
994
995
996
997
998
999 smp_cpu_in_msg[p]--;
1000 message_cpu=NO_PROC_ID;
1001 }
1002
1003
1004
1005
1006
1007
1008
1009 void smp_invalidate(void)
1010 {
1011 unsigned long flags;
1012 if(smp_activated && smp_processor_id()!=active_kernel_processor)
1013 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
1024
1025
1026
1027
1028
1029
1030 save_flags(flags);
1031 cli();
1032 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
1033
1034
1035
1036
1037
1038 local_invalidate();
1039
1040 restore_flags(flags);
1041
1042
1043
1044
1045
1046
1047 }
1048
1049
1050
1051
1052
1053 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
1054 {
1055 #ifdef DEBUGGING_SMP_RESCHED
1056 static int ct=0;
1057 if(ct==0)
1058 {
1059 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
1060 ct=1;
1061 }
1062 #endif
1063 if(smp_processor_id()!=active_kernel_processor)
1064 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
1065 smp_processor_id(), active_kernel_processor);
1066
1067
1068
1069
1070 if (user_mode(regs))
1071 {
1072 current->utime++;
1073 if (current->pid)
1074 {
1075 if (current->priority < 15)
1076 kstat.cpu_nice++;
1077 else
1078 kstat.cpu_user++;
1079 }
1080
1081 if (current->it_virt_value && !(--current->it_virt_value)) {
1082 current->it_virt_value = current->it_virt_incr;
1083 send_sig(SIGVTALRM,current,1);
1084 }
1085 } else {
1086 current->stime++;
1087 if(current->pid)
1088 kstat.cpu_system++;
1089 #ifdef CONFIG_PROFILE
1090 if (prof_buffer && current->pid) {
1091 extern int _stext;
1092 unsigned long eip = regs->eip - (unsigned long) &_stext;
1093 eip >>= CONFIG_PROFILE_SHIFT;
1094 if (eip < prof_len)
1095 prof_buffer[eip]++;
1096 }
1097 #endif
1098 }
1099
1100
1101
1102 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
1103 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
1104 send_sig(SIGKILL, current, 1);
1105 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
1106 (((current->stime + current->utime) % HZ) == 0)) {
1107 unsigned long psecs = (current->stime + current->utime) / HZ;
1108
1109 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
1110 send_sig(SIGXCPU, current, 1);
1111
1112 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
1113 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
1114 send_sig(SIGXCPU, current, 1);
1115 }
1116
1117
1118 if (current->it_prof_value && !(--current->it_prof_value)) {
1119 current->it_prof_value = current->it_prof_incr;
1120 send_sig(SIGPROF,current,1);
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136 if ( 0 > --current->counter || current->pid == 0)
1137 {
1138 current->counter = 0;
1139 need_resched=1;
1140 }
1141
1142
1143
1144
1145
1146 apic_read(APIC_SPIV);
1147 apic_write(APIC_EOI, 0);
1148 }
1149
1150
1151
1152
1153
1154 void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs)
1155 {
1156 int i=smp_processor_id();
1157
1158
1159
1160 switch(smp_msg_id)
1161 {
1162 case 0:
1163 return;
1164
1165
1166
1167
1168
1169 case MSG_INVALIDATE_TLB:
1170 if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
1171 local_invalidate();
1172 set_bit(i, (unsigned long *)&cpu_callin_map[0]);
1173 cpu_callin_map[0]|=1<<smp_processor_id();
1174 break;
1175
1176
1177
1178
1179 case MSG_STOP_CPU:
1180 while(1)
1181 {
1182 if(cpu_data[smp_processor_id()].hlt_works_ok)
1183 __asm__("hlt");
1184 }
1185 default:
1186 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1187 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1188 break;
1189 }
1190
1191
1192
1193
1194 apic_read(APIC_SPIV);
1195 apic_write(APIC_EOI, 0);
1196 }