This source file includes following definitions.
- mpf_checksum
- mpc_family
- smp_read_mpc
- smp_scan_config
- install_trampoline
- smp_alloc_memory
- get_kernel_stack
- smp_store_cpu_info
- smp_commence
- smp_callin
- smp_boot_cpus
- smp_message_pass
- smp_invalidate
- smp_reschedule_irq
- smp_message_irq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/config.h>
27 #include <linux/timer.h>
28 #include <linux/sched.h>
29 #include <linux/mm.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/delay.h>
32 #include <linux/mc146818rtc.h>
33 #include <asm/i82489.h>
34 #include <linux/smp.h>
35 #include <asm/pgtable.h>
36 #include <asm/bitops.h>
37 #include <asm/pgtable.h>
38 #include <asm/smp.h>
39
40 int smp_found_config=0;
41
42 unsigned long cpu_present_map = 0;
43 int smp_num_cpus;
44 int smp_threads_ready=0;
45 volatile int cpu_number_map[NR_CPUS];
46 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
47 volatile unsigned long smp_invalidate_needed;
48 struct cpuinfo_x86 cpu_data[NR_CPUS];
49 static unsigned int num_processors = 1;
50 static unsigned long io_apic_addr = 0;
51 unsigned char boot_cpu_id = 0;
52 static unsigned char *kstack_base,*kstack_end;
53 static int smp_activated = 0;
54 int apic_version[NR_CPUS];
55 static volatile int smp_commenced=0;
56 unsigned long apic_addr=0xFEE00000;
57 unsigned long nlong = 0;
58 unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;
59 unsigned long apic_retval;
60 unsigned char *kernel_stacks[NR_CPUS];
61
62 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
63 static volatile unsigned long smp_msg_data;
64 static volatile int smp_src_cpu;
65 static volatile int smp_msg_id;
66
67 volatile unsigned long kernel_flag=0;
68 volatile unsigned char active_kernel_processor = NO_PROC_ID;
69 volatile unsigned long kernel_counter=0;
70 volatile unsigned long syscall_count=0;
71
72 volatile unsigned long ipi_count;
73 #ifdef __SMP_PROF__
74 volatile unsigned long smp_spins[NR_CPUS]={0};
75 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
76 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
77 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
78 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
79 #endif
80 #if defined (__SMP_PROF__)
81 volatile unsigned long smp_idle_map=0;
82 #endif
83
84
85
86 #ifdef SMP_DEBUG
87 #define SMP_PRINTK(x) printk x
88 #else
89 #define SMP_PRINTK(x)
90 #endif
91
92
93
94
95
96
97 static int mpf_checksum(unsigned char *mp, int len)
98 {
99 int sum=0;
100 while(len--)
101 sum+=*mp++;
102 return sum&0xFF;
103 }
104
105
106
107
108
109 static char *mpc_family(int family,int model)
110 {
111 static char n[32];
112 static char *model_defs[]=
113 {
114 "80486DX","80486DX",
115 "80486SX","80486DX/2 or 80487",
116 "80486SL","Intel5X2(tm)",
117 "Unknown","Unknown",
118 "80486DX/4"
119 };
120 if(family==0x6)
121 return("Pentium(tm) Pro");
122 if(family==0x5)
123 return("Pentium(tm)");
124 if(family==0x0F && model==0x0F)
125 return("Special controller");
126 if(family==0x04 && model<9)
127 return model_defs[model];
128 sprintf(n,"Unknown CPU [%d:%d]",family, model);
129 return n;
130 }
131
132
133
134
135
136 static int smp_read_mpc(struct mp_config_table *mpc)
137 {
138 char str[16];
139 int count=sizeof(*mpc);
140 int apics=0;
141 unsigned char *mpt=((unsigned char *)mpc)+count;
142
143 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
144 {
145 printk("Bad signature [%c%c%c%c].\n",
146 mpc->mpc_signature[0],
147 mpc->mpc_signature[1],
148 mpc->mpc_signature[2],
149 mpc->mpc_signature[3]);
150 return 1;
151 }
152 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
153 {
154 printk("Checksum error.\n");
155 return 1;
156 }
157 if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
158 {
159 printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
160 return 1;
161 }
162 memcpy(str,mpc->mpc_oem,8);
163 str[8]=0;
164 printk("OEM ID: %s ",str);
165 memcpy(str,mpc->mpc_productid,12);
166 str[12]=0;
167 printk("Product ID: %s ",str);
168 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
169
170
171 apic_addr = mpc->mpc_lapic;
172
173
174
175
176
177 while(count<mpc->mpc_length)
178 {
179 switch(*mpt)
180 {
181 case MP_PROCESSOR:
182 {
183 struct mpc_config_processor *m=
184 (struct mpc_config_processor *)mpt;
185 if(m->mpc_cpuflag&CPU_ENABLED)
186 {
187 printk("Processor #%d %s APIC version %d\n",
188 m->mpc_apicid,
189 mpc_family((m->mpc_cpufeature&
190 CPU_FAMILY_MASK)>>8,
191 (m->mpc_cpufeature&
192 CPU_MODEL_MASK)>>4),
193 m->mpc_apicver);
194 #ifdef SMP_DEBUG
195 if(m->mpc_featureflag&(1<<0))
196 printk(" Floating point unit present.\n");
197 if(m->mpc_featureflag&(1<<7))
198 printk(" Machine Exception supported.\n");
199 if(m->mpc_featureflag&(1<<8))
200 printk(" 64 bit compare & exchange supported.\n");
201 if(m->mpc_featureflag&(1<<9))
202 printk(" Internal APIC present.\n");
203 #endif
204 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
205 {
206 SMP_PRINTK((" Bootup CPU\n"));
207 boot_cpu_id=m->mpc_apicid;
208 nlong = boot_cpu_id<<24;
209 }
210 else
211 num_processors++;
212
213 if(m->mpc_apicid>NR_CPUS)
214 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
215 else
216 {
217 cpu_present_map|=(1<<m->mpc_apicid);
218 apic_version[m->mpc_apicid]=m->mpc_apicver;
219 }
220 }
221 mpt+=sizeof(*m);
222 count+=sizeof(*m);
223 break;
224 }
225 case MP_BUS:
226 {
227 struct mpc_config_bus *m=
228 (struct mpc_config_bus *)mpt;
229 memcpy(str,m->mpc_bustype,6);
230 str[6]=0;
231 SMP_PRINTK(("Bus #%d is %s\n",
232 m->mpc_busid,
233 str));
234 mpt+=sizeof(*m);
235 count+=sizeof(*m);
236 break;
237 }
238 case MP_IOAPIC:
239 {
240 struct mpc_config_ioapic *m=
241 (struct mpc_config_ioapic *)mpt;
242 if(m->mpc_flags&MPC_APIC_USABLE)
243 {
244 apics++;
245 printk("I/O APIC #%d Version %d at 0x%lX.\n",
246 m->mpc_apicid,m->mpc_apicver,
247 m->mpc_apicaddr);
248 io_apic_addr = m->mpc_apicaddr;
249 }
250 mpt+=sizeof(*m);
251 count+=sizeof(*m);
252 break;
253 }
254 case MP_INTSRC:
255 {
256 struct mpc_config_intsrc *m=
257 (struct mpc_config_intsrc *)mpt;
258
259 mpt+=sizeof(*m);
260 count+=sizeof(*m);
261 break;
262 }
263 case MP_LINTSRC:
264 {
265 struct mpc_config_intlocal *m=
266 (struct mpc_config_intlocal *)mpt;
267 mpt+=sizeof(*m);
268 count+=sizeof(*m);
269 break;
270 }
271 }
272 }
273 if(apics>1)
274 printk("Warning: Multiple APIC's not supported.\n");
275 return num_processors;
276 }
277
278
279
280
281
282 void smp_scan_config(unsigned long base, unsigned long length)
283 {
284 unsigned long *bp=(unsigned long *)base;
285 struct intel_mp_floating *mpf;
286
287 SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
288 bp,length));
289 if(sizeof(*mpf)!=16)
290 printk("Error: MPF size\n");
291
292 while(length>0)
293 {
294 if(*bp==SMP_MAGIC_IDENT)
295 {
296 mpf=(struct intel_mp_floating *)bp;
297 if(mpf->mpf_length==1 &&
298 !mpf_checksum((unsigned char *)bp,16) &&
299 (mpf->mpf_specification == 1
300 || mpf->mpf_specification == 4) )
301 {
302 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
303 if(mpf->mpf_feature2&(1<<7))
304 printk(" IMCR and PIC compatibility mode.\n");
305 else
306 printk(" Virtual Wire compatibility mode.\n");
307 smp_found_config=1;
308
309
310
311 if(mpf->mpf_feature1!=0)
312 {
313 num_processors=2;
314 printk("I/O APIC at 0xFEC00000.\n");
315 printk("Bus#0 is ");
316 }
317 switch(mpf->mpf_feature1)
318 {
319 case 1:
320 printk("ISA");
321 break;
322 case 2:
323 printk("EISA with no IRQ8 chaining");
324 break;
325 case 3:
326 printk("EISA");
327 break;
328 case 4:
329 printk("MCA");
330 break;
331 case 5:
332 printk("ISA\nBus#1 is PCI");
333 break;
334 case 6:
335 printk("EISA\nBus #1 is PCI");
336 break;
337 case 7:
338 printk("MCA\nBus #1 is PCI");
339 break;
340 case 0:
341 break;
342 default:
343 printk("???\nUnknown standard configuration %d\n",
344 mpf->mpf_feature1);
345 return;
346 }
347
348
349
350
351 if(mpf->mpf_physptr)
352 smp_read_mpc((void *)mpf->mpf_physptr);
353 else
354 cpu_present_map=3;
355 printk("Processors: %d\n", num_processors);
356
357
358
359 return;
360 }
361 }
362 bp+=4;
363 length-=16;
364 }
365 }
366
367
368
369
370
371 static unsigned char trampoline_data[]={
372 #include "trampoline.hex"
373 };
374
375
376
377
378
379
380
381 static void install_trampoline(unsigned char *mp)
382 {
383 memcpy(mp,trampoline_data,sizeof(trampoline_data));
384 }
385
386
387
388
389
390
391
392
393 unsigned long smp_alloc_memory(unsigned long mem_base)
394 {
395 int size=(num_processors-1)*PAGE_SIZE;
396
397
398
399
400
401 if(mem_base+size>=0x9F000)
402 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
403 kstack_base=(void *)mem_base;
404 mem_base+=size;
405 kstack_end=(void *)mem_base;
406 return mem_base;
407 }
408
409
410
411
412
413 static void *get_kernel_stack(void)
414 {
415 void *stack=kstack_base;
416 if(kstack_base>=kstack_end)
417 return NULL;
418 kstack_base+=PAGE_SIZE;
419 return stack;
420 }
421
422
423
424
425
426
427
428 void smp_store_cpu_info(int id)
429 {
430 struct cpuinfo_x86 *c=&cpu_data[id];
431 c->hard_math=hard_math;
432 c->x86=x86;
433 c->x86_model=x86_model;
434 c->x86_mask=x86_mask;
435 c->x86_capability=x86_capability;
436 c->fdiv_bug=fdiv_bug;
437 c->wp_works_ok=wp_works_ok;
438 c->hlt_works_ok=hlt_works_ok;
439 c->udelay_val=loops_per_sec;
440 strcpy(c->x86_vendor_id, x86_vendor_id);
441 }
442
443
444
445
446
447
448
449
450
451
452
453 void smp_commence(void)
454 {
455
456
457
458 smp_commenced=1;
459 }
460
461 void smp_callin(void)
462 {
463 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
464 unsigned long l;
465
466
467
468
469 SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
470 l=apic_read(APIC_SPIV);
471 l|=(1<<8);
472 apic_write(APIC_SPIV,l);
473 sti();
474
475
476
477 calibrate_delay();
478
479
480
481 smp_store_cpu_info(cpuid);
482
483
484
485 set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
486
487
488
489 load_ldt(0);
490
491
492 local_invalidate();
493 while(!smp_commenced);
494 if (cpu_number_map[cpuid] == -1)
495 while(1);
496 local_invalidate();
497 SMP_PRINTK(("Commenced..\n"));
498
499 load_TR(cpu_number_map[cpuid]);
500
501 }
502
503
504
505
506
507 void smp_boot_cpus(void)
508 {
509 int i,j;
510 int cpucount=0;
511 unsigned long cfg;
512 void *stack;
513 extern unsigned long init_user_stack[];
514
515
516
517
518
519 for (i = 0; i < NR_CPUS; i++)
520 cpu_number_map[i] = -1;
521
522
523
524
525
526 kernel_stacks[boot_cpu_id]=(void *)init_user_stack;
527
528 smp_store_cpu_info(boot_cpu_id);
529
530 cpu_present_map |= (1 << smp_processor_id());
531 cpu_number_map[boot_cpu_id] = 0;
532 active_kernel_processor=boot_cpu_id;
533
534
535
536
537
538
539 if (!smp_found_config)
540 return;
541
542
543
544
545
546 apic_reg = vremap(0xFEE00000,4096);
547
548 if(apic_reg == NULL)
549 panic("Unable to map local apic.\n");
550
551 #ifdef SMP_DEBUG
552 {
553 int reg;
554
555
556
557
558
559
560
561
562 reg = apic_read(APIC_VERSION);
563 SMP_PRINTK(("Getting VERSION: %x\n", reg));
564
565 apic_write(APIC_VERSION, 0);
566 reg = apic_read(APIC_VERSION);
567 SMP_PRINTK(("Getting VERSION: %x\n", reg));
568
569
570
571
572
573
574
575
576
577
578
579
580
581 reg = apic_read(APIC_LVT0);
582 SMP_PRINTK(("Getting LVT0: %x\n", reg));
583
584 reg = apic_read(APIC_LVT1);
585 SMP_PRINTK(("Getting LVT1: %x\n", reg));
586 }
587 #endif
588
589
590
591
592
593 cfg=apic_read(APIC_SPIV);
594 cfg|=(1<<8);
595 apic_write(APIC_SPIV,cfg);
596
597 udelay(10);
598
599
600
601
602
603 SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
604
605 for(i=0;i<NR_CPUS;i++)
606 {
607
608
609
610 if (i == boot_cpu_id)
611 continue;
612
613 if (cpu_present_map & (1 << i))
614 {
615 unsigned long send_status, accept_status;
616 int timeout, num_starts;
617
618
619
620
621
622 stack=get_kernel_stack();
623 if(stack==NULL)
624 panic("No memory for processor stacks.\n");
625 kernel_stacks[i]=stack;
626 install_trampoline(stack);
627
628 printk("Booting processor %d stack %p: ",i,stack);
629
630
631
632
633
634
635 SMP_PRINTK(("Setting warm reset code and vector.\n"));
636
637
638
639
640
641 CMOS_WRITE(0xa, 0xf);
642 pg0[0]=7;
643 local_invalidate();
644 *((volatile unsigned short *) 0x467) = ((unsigned long)stack)>>4;
645 *((volatile unsigned short *) 0x469) = 0;
646
647
648
649
650
651 pg0[0]= pte_val(mk_pte(0, PAGE_READONLY));
652 local_invalidate();
653
654
655
656
657
658 apic_write(APIC_ESR, 0);
659 accept_status = (apic_read(APIC_ESR) & 0xEF);
660
661
662
663
664
665 send_status = 0;
666 accept_status = 0;
667
668
669
670
671
672 SMP_PRINTK(("Asserting INIT.\n"));
673
674
675
676
677
678 cfg=apic_read(APIC_ICR2);
679 cfg&=0x00FFFFFF;
680 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
681 cfg=apic_read(APIC_ICR);
682 cfg&=~0xCDFFF;
683 cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
684 | APIC_DEST_ASSERT | APIC_DEST_DM_INIT);
685 apic_write(APIC_ICR, cfg);
686
687 udelay(200);
688 SMP_PRINTK(("Deasserting INIT.\n"));
689
690 cfg=apic_read(APIC_ICR2);
691 cfg&=0x00FFFFFF;
692 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
693 cfg=apic_read(APIC_ICR);
694 cfg&=~0xCDFFF;
695 cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
696 | APIC_DEST_DM_INIT);
697 apic_write(APIC_ICR, cfg);
698
699
700
701
702
703
704
705
706
707 if ( apic_version[i] & 0xF0 )
708 num_starts = 2;
709 else
710 num_starts = 0;
711
712
713
714
715
716 for (j = 0; !(send_status || accept_status)
717 && (j < num_starts) ; j++)
718 {
719 SMP_PRINTK(("Sending STARTUP #%d.\n",j));
720
721 apic_write(APIC_ESR, 0);
722
723
724
725
726
727 cfg=apic_read(APIC_ICR2);
728 cfg&=0x00FFFFFF;
729 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
730 cfg=apic_read(APIC_ICR);
731 cfg&=~0xCDFFF;
732 cfg |= (APIC_DEST_FIELD
733 | APIC_DEST_DM_STARTUP
734 | (((int) stack) >> 12) );
735 apic_write(APIC_ICR, cfg);
736
737 timeout = 0;
738 do {
739 udelay(10);
740 } while ( (send_status = (apic_read(APIC_ICR) & 0x1000))
741 && (timeout++ < 1000));
742 udelay(200);
743
744 accept_status = (apic_read(APIC_ESR) & 0xEF);
745 }
746
747 if (send_status)
748 printk("APIC never delivered???\n");
749 if (accept_status)
750 printk("APIC delivery error (%lx).\n", accept_status);
751
752 if( !(send_status || accept_status) )
753 {
754 for(timeout=0;timeout<50000;timeout++)
755 {
756 if(cpu_callin_map[0]&(1<<i))
757 break;
758 udelay(100);
759 }
760 if(cpu_callin_map[0]&(1<<i))
761 {
762 cpucount++;
763
764 cpu_number_map[i] = cpucount;
765 }
766 else
767 {
768 if(*((volatile unsigned char *)8192)==0xA5)
769 printk("Stuck ??\n");
770 else
771 printk("Not responding.\n");
772 }
773 }
774
775
776 *((volatile unsigned long *)8192) = 0;
777 }
778
779
780
781
782
783 if (cpu_number_map[i] == -1)
784 cpu_present_map &= ~(1 << i);
785 }
786
787
788
789
790
791
792
793
794
795 cfg = pg0[0];
796 pg0[0] = 3;
797 local_invalidate();
798
799
800
801
802
803
804 CMOS_WRITE(0, 0xf);
805
806 *((volatile long *) 0x467) = 0;
807
808
809
810
811
812 pg0[0] = cfg;
813 local_invalidate();
814
815
816
817
818
819 if(cpucount==0)
820 {
821 printk("Error: only one processor found.\n");
822 cpu_present_map=(1<<smp_processor_id());
823 }
824 else
825 {
826 unsigned long bogosum=0;
827 for(i=0;i<32;i++)
828 {
829 if(cpu_present_map&(1<<i))
830 bogosum+=cpu_data[i].udelay_val;
831 }
832 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
833 cpucount+1,
834 (bogosum+2500)/500000,
835 ((bogosum+2500)/5000)%100);
836 smp_activated=1;
837 smp_num_cpus=cpucount+1;
838 }
839 }
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 void smp_message_pass(int target, int msg, unsigned long data, int wait)
856 {
857 unsigned long cfg;
858 unsigned long target_map;
859 int p=smp_processor_id();
860 int irq=0x2d;
861 int ct=0;
862 static volatile int message_cpu = NO_PROC_ID;
863
864
865
866
867
868 if(!smp_activated || !smp_commenced)
869 return;
870
871
872
873
874
875
876
877
878 if(msg==MSG_RESCHEDULE)
879 {
880 irq=0x30;
881 if(smp_cpu_in_msg[p])
882 return;
883 }
884
885
886
887
888
889
890
891 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
892 {
893 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
894 smp_processor_id(),msg,message_cpu, smp_msg_id);
895 }
896 message_cpu=smp_processor_id();
897
898
899
900
901
902
903 smp_cpu_in_msg[p]++;
904
905
906
907
908
909 if(msg!=MSG_RESCHEDULE)
910 {
911 smp_src_cpu=p;
912 smp_msg_id=msg;
913 smp_msg_data=data;
914 }
915
916
917
918
919
920
921
922
923
924 while(ct<1000)
925 {
926 cfg=apic_read(APIC_ICR);
927 if(!(cfg&(1<<12)))
928 break;
929 ct++;
930 udelay(10);
931 }
932
933
934
935
936
937 if(ct==1000)
938 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
939
940
941
942
943
944 cfg=apic_read(APIC_ICR2);
945 cfg&=0x00FFFFFF;
946 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));
947 cfg=apic_read(APIC_ICR);
948 cfg&=~0xFDFFF;
949 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;
950
951
952
953
954
955 if(target==MSG_ALL_BUT_SELF)
956 {
957 cfg|=APIC_DEST_ALLBUT;
958 target_map=cpu_present_map;
959 cpu_callin_map[0]=(1<<smp_src_cpu);
960 }
961 else if(target==MSG_ALL)
962 {
963 cfg|=APIC_DEST_ALLINC;
964 target_map=cpu_present_map;
965 cpu_callin_map[0]=0;
966 }
967 else
968 {
969 target_map=(1<<target);
970 cpu_callin_map[0]=0;
971 }
972
973
974
975
976
977 apic_write(APIC_ICR, cfg);
978
979
980
981
982
983 switch(wait)
984 {
985 case 1:
986 while(cpu_callin_map[0]!=target_map);
987 break;
988 case 2:
989 while(smp_invalidate_needed);
990 break;
991 }
992
993
994
995
996
997 smp_cpu_in_msg[p]--;
998 message_cpu=NO_PROC_ID;
999 }
1000
1001
1002
1003
1004
1005
1006
1007 void smp_invalidate(void)
1008 {
1009 unsigned long flags;
1010 if(smp_activated && smp_processor_id()!=active_kernel_processor)
1011 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
1022
1023
1024
1025
1026
1027
1028 save_flags(flags);
1029 cli();
1030 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
1031
1032
1033
1034
1035
1036 local_invalidate();
1037
1038 restore_flags(flags);
1039
1040
1041
1042
1043
1044
1045 }
1046
1047
1048
1049
1050
1051 void smp_reschedule_irq(int cpl, void *dev_id, struct pt_regs *regs)
1052 {
1053 #ifdef DEBUGGING_SMP_RESCHED
1054 static int ct=0;
1055 if(ct==0)
1056 {
1057 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
1058 ct=1;
1059 }
1060 #endif
1061 if(smp_processor_id()!=active_kernel_processor)
1062 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
1063 smp_processor_id(), active_kernel_processor);
1064
1065
1066
1067
1068 if (user_mode(regs))
1069 {
1070 current->utime++;
1071 if (current->pid)
1072 {
1073 if (current->priority < 15)
1074 kstat.cpu_nice++;
1075 else
1076 kstat.cpu_user++;
1077 }
1078
1079 if (current->it_virt_value && !(--current->it_virt_value)) {
1080 current->it_virt_value = current->it_virt_incr;
1081 send_sig(SIGVTALRM,current,1);
1082 }
1083 } else {
1084 current->stime++;
1085 if(current->pid)
1086 kstat.cpu_system++;
1087 #ifdef CONFIG_PROFILE
1088 if (prof_buffer && current->pid) {
1089 extern int _stext;
1090 unsigned long eip = regs->eip - (unsigned long) &_stext;
1091 eip >>= CONFIG_PROFILE_SHIFT;
1092 if (eip < prof_len)
1093 prof_buffer[eip]++;
1094 }
1095 #endif
1096 }
1097
1098
1099
1100 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
1101 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
1102 send_sig(SIGKILL, current, 1);
1103 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
1104 (((current->stime + current->utime) % HZ) == 0)) {
1105 unsigned long psecs = (current->stime + current->utime) / HZ;
1106
1107 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
1108 send_sig(SIGXCPU, current, 1);
1109
1110 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
1111 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
1112 send_sig(SIGXCPU, current, 1);
1113 }
1114
1115
1116 if (current->it_prof_value && !(--current->it_prof_value)) {
1117 current->it_prof_value = current->it_prof_incr;
1118 send_sig(SIGPROF,current,1);
1119 }
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 if ( 0 > --current->counter || current->pid == 0)
1135 {
1136 current->counter = 0;
1137 need_resched=1;
1138 }
1139
1140
1141
1142
1143
1144 apic_read(APIC_SPIV);
1145 apic_write(APIC_EOI, 0);
1146 }
1147
1148
1149
1150
1151
1152 void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs)
1153 {
1154 int i=smp_processor_id();
1155
1156
1157
1158 switch(smp_msg_id)
1159 {
1160 case 0:
1161 return;
1162
1163
1164
1165
1166
1167 case MSG_INVALIDATE_TLB:
1168 if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
1169 local_invalidate();
1170 set_bit(i, (unsigned long *)&cpu_callin_map[0]);
1171 cpu_callin_map[0]|=1<<smp_processor_id();
1172 break;
1173
1174
1175
1176
1177 case MSG_STOP_CPU:
1178 while(1)
1179 {
1180 if(cpu_data[smp_processor_id()].hlt_works_ok)
1181 __asm__("hlt");
1182 }
1183 default:
1184 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1185 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1186 break;
1187 }
1188
1189
1190
1191
1192 apic_read(APIC_SPIV);
1193 apic_write(APIC_EOI, 0);
1194 }