This source file includes following definitions.
- mpf_checksum
- mpc_family
- smp_read_mpc
- smp_scan_config
- install_trampoline
- smp_alloc_memory
- get_kernel_stack
- smp_store_cpu_info
- smp_commence
- smp_callin
- smp_boot_cpus
- smp_message_pass
- smp_invalidate
- smp_reschedule_irq
- smp_message_irq
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/config.h>
25 #include <linux/timer.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <asm/i82489.h>
31 #include <linux/smp.h>
32 #include <asm/pgtable.h>
33 #include <asm/bitops.h>
34 #include <asm/pgtable.h>
35 #include <asm/smp.h>
36
37 static int smp_found_config=0;
38
39 unsigned long cpu_present_map = 0;
40 int smp_num_cpus;
41 int smp_threads_ready=0;
42 volatile unsigned long cpu_number_map[NR_CPUS];
43 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
44 volatile unsigned long smp_invalidate_needed;
45 struct cpuinfo_x86 cpu_data[NR_CPUS];
46 static unsigned int num_processors = 1;
47 static unsigned long io_apic_addr = 0;
48 unsigned char boot_cpu_id = 0;
49 static unsigned char *kstack_base,*kstack_end;
50 static int smp_activated = 0;
51 static volatile int smp_commenced=0;
52 unsigned long apic_addr=0xFEE00000;
53 unsigned long nlong = 0;
54 unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;
55 unsigned long apic_retval;
56 unsigned char *kernel_stacks[NR_CPUS];
57
58 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
59 static volatile unsigned long smp_msg_data;
60 static volatile int smp_src_cpu;
61 static volatile int smp_msg_id;
62
63 volatile unsigned long kernel_flag=0;
64 volatile unsigned char active_kernel_processor = NO_PROC_ID;
65 volatile unsigned long kernel_counter=0;
66 volatile unsigned long syscall_count=0;
67
68 volatile unsigned long ipi_count;
69 #ifdef __SMP_PROF__
70 volatile unsigned long smp_spins[NR_CPUS]={0};
71 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
72 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
73 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
74 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
75 #endif
76 #if defined (__SMP_PROF__)
77 volatile unsigned long smp_idle_map=0;
78 #endif
79
80
81
82 #ifdef SMP_DEBUG
83 #define SMP_PRINTK(x) printk x
84 #else
85 #define SMP_PRINTK(x)
86 #endif
87
88
89
90
91
92
93 static int mpf_checksum(unsigned char *mp, int len)
94 {
95 int sum=0;
96 while(len--)
97 sum+=*mp++;
98 return sum&0xFF;
99 }
100
101
102
103
104
105 static char *mpc_family(int family,int model)
106 {
107 static char n[32];
108 static char *model_defs[]=
109 {
110 "80486DX","80486DX",
111 "80486SX","80486DX/2 or 80487",
112 "80486SL","Intel5X2(tm)",
113 "Unknown","Unknown",
114 "80486DX/4"
115 };
116 if(family==0x5)
117 return("Pentium(tm)");
118 if(family==0x0F && model==0x0F)
119 return("Special controller");
120 if(family==0x04 && model<9)
121 return model_defs[model];
122 sprintf(n,"Unknown CPU [%d:%d]",family, model);
123 return n;
124 }
125
126
127
128
129
130 static int smp_read_mpc(struct mp_config_table *mpc)
131 {
132 char str[16];
133 int count=sizeof(*mpc);
134 int apics=0;
135 unsigned char *mpt=((unsigned char *)mpc)+count;
136
137 if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
138 {
139 printk("Bad signature [%c%c%c%c].\n",
140 mpc->mpc_signature[0],
141 mpc->mpc_signature[1],
142 mpc->mpc_signature[2],
143 mpc->mpc_signature[3]);
144 return 1;
145 }
146 if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
147 {
148 printk("Checksum error.\n");
149 return 1;
150 }
151 if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
152 {
153 printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
154 return 1;
155 }
156 memcpy(str,mpc->mpc_oem,8);
157 str[8]=0;
158 printk("OEM ID: %s ",str);
159 memcpy(str,mpc->mpc_productid,12);
160 str[12]=0;
161 printk("Product ID: %s ",str);
162 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
163
164
165 apic_addr = mpc->mpc_lapic;
166
167
168
169
170
171 while(count<mpc->mpc_length)
172 {
173 switch(*mpt)
174 {
175 case MP_PROCESSOR:
176 {
177 struct mpc_config_processor *m=
178 (struct mpc_config_processor *)mpt;
179 if(m->mpc_cpuflag&CPU_ENABLED)
180 {
181 printk("Processor #%d %s APIC version %d\n",
182 m->mpc_apicid,
183 mpc_family((m->mpc_cpufeature&
184 CPU_FAMILY_MASK)>>8,
185 (m->mpc_cpufeature&
186 CPU_MODEL_MASK)>>4),
187 m->mpc_apicver);
188 #ifdef SMP_DEBUG
189 if(m->mpc_featureflag&(1<<0))
190 printk(" Floating point unit present.\n");
191 if(m->mpc_featureflag&(1<<7))
192 printk(" Machine Exception supported.\n");
193 if(m->mpc_featureflag&(1<<8))
194 printk(" 64 bit compare & exchange supported.\n");
195 if(m->mpc_featureflag&(1<<9))
196 printk(" Internal APIC present.\n");
197 #endif
198 if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
199 {
200 SMP_PRINTK((" Bootup CPU\n"));
201 boot_cpu_id=m->mpc_apicid;
202 nlong = boot_cpu_id<<24;
203 }
204 else
205 num_processors++;
206
207 if(m->mpc_apicid>NR_CPUS)
208 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
209 else
210 cpu_present_map|=(1<<m->mpc_apicid);
211 }
212 mpt+=sizeof(*m);
213 count+=sizeof(*m);
214 break;
215 }
216 case MP_BUS:
217 {
218 struct mpc_config_bus *m=
219 (struct mpc_config_bus *)mpt;
220 memcpy(str,m->mpc_bustype,6);
221 str[6]=0;
222 SMP_PRINTK(("Bus #%d is %s\n",
223 m->mpc_busid,
224 str));
225 mpt+=sizeof(*m);
226 count+=sizeof(*m);
227 break;
228 }
229 case MP_IOAPIC:
230 {
231 struct mpc_config_ioapic *m=
232 (struct mpc_config_ioapic *)mpt;
233 if(m->mpc_flags&MPC_APIC_USABLE)
234 {
235 apics++;
236 printk("I/O APIC #%d Version %d at 0x%lX.\n",
237 m->mpc_apicid,m->mpc_apicver,
238 m->mpc_apicaddr);
239 io_apic_addr = m->mpc_apicaddr;
240 }
241 mpt+=sizeof(*m);
242 count+=sizeof(*m);
243 break;
244 }
245 case MP_INTSRC:
246 {
247 struct mpc_config_intsrc *m=
248 (struct mpc_config_intsrc *)mpt;
249
250 mpt+=sizeof(*m);
251 count+=sizeof(*m);
252 break;
253 }
254 case MP_LINTSRC:
255 {
256 struct mpc_config_intlocal *m=
257 (struct mpc_config_intlocal *)mpt;
258 mpt+=sizeof(*m);
259 count+=sizeof(*m);
260 break;
261 }
262 }
263 }
264 if(apics>1)
265 printk("Warning: Multiple APIC's not supported.\n");
266 return num_processors;
267 }
268
269
270
271
272
273 void smp_scan_config(unsigned long base, unsigned long length)
274 {
275 unsigned long *bp=(unsigned long *)base;
276 struct intel_mp_floating *mpf;
277
278 SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
279 bp,length));
280 if(sizeof(*mpf)!=16)
281 printk("Error: MPF size\n");
282
283 while(length>0)
284 {
285 if(*bp==SMP_MAGIC_IDENT)
286 {
287 mpf=(struct intel_mp_floating *)bp;
288 if(mpf->mpf_length==1 &&
289 !mpf_checksum((unsigned char *)bp,16) &&
290 (mpf->mpf_specification == 1
291 || mpf->mpf_specification == 4) )
292 {
293 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
294 if(mpf->mpf_feature2&(1<<7))
295 printk(" IMCR and PIC compatibility mode.\n");
296 else
297 printk(" Virtual Wire compatibility mode.\n");
298 smp_found_config=1;
299
300
301
302 if(mpf->mpf_feature1!=0)
303 {
304 num_processors=2;
305 printk("I/O APIC at 0xFEC00000.\n");
306 printk("Bus#0 is ");
307 }
308 switch(mpf->mpf_feature1)
309 {
310 case 1:
311 printk("ISA");
312 break;
313 case 2:
314 printk("EISA with no IRQ8 chaining");
315 break;
316 case 3:
317 printk("EISA");
318 break;
319 case 4:
320 printk("MCA");
321 break;
322 case 5:
323 printk("ISA\nBus#1 is PCI");
324 break;
325 case 6:
326 printk("EISA\nBus #1 is PCI");
327 break;
328 case 7:
329 printk("MCA\nBus #1 is PCI");
330 break;
331 case 0:
332 break;
333 default:
334 printk("???\nUnknown standard configuration %d\n",
335 mpf->mpf_feature1);
336 return;
337 }
338
339
340
341
342 if(mpf->mpf_physptr)
343 smp_read_mpc((void *)mpf->mpf_physptr);
344 else
345 cpu_present_map=3;
346 printk("Processors: %d\n", num_processors);
347 }
348 }
349 bp+=4;
350 length-=16;
351 }
352 }
353
354
355
356
357
358 static unsigned char trampoline_data[]={
359 #include "trampoline.hex"
360 };
361
362
363
364
365
366
367
368 static void install_trampoline(unsigned char *mp)
369 {
370 memcpy(mp,trampoline_data,sizeof(trampoline_data));
371 }
372
373
374
375
376
377
378
379
380 unsigned long smp_alloc_memory(unsigned long mem_base)
381 {
382 int size=(num_processors-1)*PAGE_SIZE;
383
384
385
386
387
388 if(mem_base+size>=0x9F000)
389 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
390 kstack_base=(void *)mem_base;
391 mem_base+=size;
392 kstack_end=(void *)mem_base;
393 return mem_base;
394 }
395
396
397
398
399
400 static void *get_kernel_stack(void)
401 {
402 void *stack=kstack_base;
403 if(kstack_base>=kstack_end)
404 return NULL;
405 kstack_base+=PAGE_SIZE;
406 return stack;
407 }
408
409
410
411
412
413
414
415 void smp_store_cpu_info(int id)
416 {
417 struct cpuinfo_x86 *c=&cpu_data[id];
418 c->hard_math=hard_math;
419 c->x86=x86;
420 c->x86_model=x86_model;
421 c->x86_mask=x86_mask;
422 c->x86_capability=x86_capability;
423 c->fdiv_bug=fdiv_bug;
424 c->wp_works_ok=wp_works_ok;
425 c->hlt_works_ok=hlt_works_ok;
426 c->udelay_val=loops_per_sec;
427 strcpy(c->x86_vendor_id, x86_vendor_id);
428 }
429
430
431
432
433
434
435
436
437
438
439
440 void smp_commence(void)
441 {
442
443
444
445 smp_commenced=1;
446 }
447
448 void smp_callin(void)
449 {
450 int cpuid=GET_APIC_ID(apic_read(APIC_ID));
451 unsigned long l;
452
453
454
455
456 SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
457 l=apic_read(APIC_SPIV);
458 l|=(1<<8);
459 apic_write(APIC_SPIV,l);
460 sti();
461
462
463
464 calibrate_delay();
465
466
467
468 smp_store_cpu_info(cpuid);
469
470
471
472 set_bit(cpuid, &cpu_callin_map[0]);
473
474
475
476 load_ldt(0);
477
478
479 local_invalidate();
480 while(!smp_commenced);
481 if (cpu_number_map[cpuid] == -1)
482 while(1);
483 local_invalidate();
484 SMP_PRINTK(("Commenced..\n"));
485
486 load_TR(cpu_number_map[cpuid]);
487
488 }
489
490
491
492
493
494 void smp_boot_cpus(void)
495 {
496 int i=0;
497 int cpucount=0;
498 void *stack;
499 extern unsigned long init_user_stack[];
500
501
502
503
504
505
506 if (1<cpu_present_map)
507 apic_reg = vremap(0xFEE00000,4096);
508
509
510 if(apic_reg == NULL)
511 panic("Unable to map local apic.\n");
512
513 #ifdef SMP_DEBUG
514 {
515 int reg;
516
517 reg = apic_read(APIC_VERSION);
518 printk("Getting VERSION: %x\n", reg);
519
520 apic_write(APIC_VERSION, 0);
521 reg = apic_read(APIC_VERSION);
522 printk("Getting VERSION: %x\n", reg);
523
524 reg = apic_read(APIC_LVT0);
525 printk("Getting LVT0: %x\n", reg);
526
527 reg = apic_read(APIC_LVT1);
528 printk("Getting LVT1: %x\n", reg);
529 }
530 #endif
531
532
533
534
535
536 kernel_stacks[boot_cpu_id]=(void *)init_user_stack;
537
538 smp_store_cpu_info(boot_cpu_id);
539
540 active_kernel_processor=boot_cpu_id;
541
542 SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
543
544 for(i=0;i<NR_CPUS;i++)
545 {
546 if((cpu_present_map&(1<<i)) && i!=boot_cpu_id)
547 {
548 unsigned long cfg, send_status, accept_status;
549 int timeout;
550
551
552
553
554
555 stack=get_kernel_stack();
556 if(stack==NULL)
557 panic("No memory for processor stacks.\n");
558 kernel_stacks[i]=stack;
559 install_trampoline(stack);
560
561 printk("Booting processor %d stack %p: ",i,stack);
562
563
564
565
566
567 cfg=apic_read(APIC_SPIV);
568 cfg|=(1<<8);
569 apic_write(APIC_SPIV,cfg);
570
571
572
573
574
575
576 #ifdef EEK
577 SMP_PRINTK(("Setting warm reset code and vector.\n"));
578
579 CMOS_WRITE(0xa, 0xf);
580 *((volatile unsigned short *) 0x467) = (unsigned short)(stack>>4);
581 *((volatile unsigned short *) 0x469) = 0;
582 #endif
583
584 apic_write(APIC_ESR, 0);
585 accept_status = (apic_read(APIC_ESR) & 0xEF);
586 send_status = 0;
587 accept_status = 0;
588
589 SMP_PRINTK(("Asserting INIT.\n"));
590
591 cfg=apic_read(APIC_ICR2);
592 cfg&=0x00FFFFFF;
593 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
594 cfg=apic_read(APIC_ICR);
595 cfg&=~0xCDFFF;
596 cfg|=0x0000c500;
597 apic_write(APIC_ICR, cfg);
598
599 timeout = 0;
600 do {
601 udelay(1000);
602 if ((send_status = (!(apic_read(APIC_ICR) & 0x00001000))))
603 break;
604 } while (timeout++ < 1000);
605
606 #ifdef EEK2
607 if (send_status) {
608 apic_write(APIC_ESR, 0);
609 accept_status = (apic_read(APIC_ESR) & 0xEF);
610 }
611 #endif
612
613 if (send_status && !accept_status)
614 {
615 SMP_PRINTK(("Deasserting INIT.\n"));
616
617 cfg=apic_read(APIC_ICR2);
618 cfg&=0x00FFFFFF;
619 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
620 cfg=apic_read(APIC_ICR);
621 cfg&=~0xCDFFF;
622 cfg|=0x00008500;
623 apic_write(APIC_ICR, cfg);
624
625 timeout = 0;
626 do {
627 udelay(1000);
628 if ((send_status = !(apic_read(APIC_ICR) & 0x00001000) ))
629 break;
630 } while (timeout++ < 1000);
631
632 if (send_status) {
633 udelay(1000000);
634 apic_write(APIC_ESR, 0);
635 accept_status = (apic_read(APIC_ESR) & 0xEF);
636 }
637 }
638
639
640
641
642
643
644
645 if (send_status && !accept_status)
646 {
647 SMP_PRINTK(("Sending first STARTUP.\n"));
648
649
650
651
652
653 cfg=apic_read(APIC_ICR2);
654 cfg&=0x00FFFFFF;
655 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
656 cfg=apic_read(APIC_ICR);
657 cfg&=~0xCDFFF ;
658 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12);
659 apic_write(APIC_ICR, cfg);
660
661 timeout = 0;
662 do {
663 udelay(1000);
664 if ((send_status = !(apic_read(APIC_ICR) & 0x00001000)) )
665 break;
666 } while (timeout++ < 1000);
667
668 if (send_status) {
669 udelay(1000000);
670 apic_write(APIC_ESR, 0);
671 accept_status = (apic_read(APIC_ESR) & 0xEF);
672 }
673 }
674
675 if (send_status && !accept_status)
676 {
677 SMP_PRINTK(("Sending second STARTUP.\n"));
678
679
680
681
682
683 cfg=apic_read(APIC_ICR2);
684 cfg&=0x00FFFFFF;
685 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));
686 cfg=apic_read(APIC_ICR);
687 cfg&=~0xCDFFF ;
688 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12);
689 apic_write(APIC_ICR, cfg);
690
691 timeout = 0;
692 do {
693 udelay(1000);
694 if ((send_status = !(apic_read(APIC_ICR) & 0x00001000)))
695 break;
696 } while (timeout++ < 1000);
697
698 if (send_status) {
699 udelay(1000000);
700 apic_write(APIC_ESR, 0);
701 accept_status = (apic_read(APIC_ESR) & 0xEF);
702 }
703 }
704
705 if (!send_status)
706 printk("APIC never delivered???\n");
707 else if (accept_status)
708 printk("APIC delivery error (%lx).\n", accept_status);
709 else
710 {
711 for(timeout=0;timeout<50000;timeout++)
712 {
713 if(cpu_callin_map[0]&(1<<i))
714 break;
715 udelay(100);
716 }
717 if(cpu_callin_map[0]&(1<<i))
718 {
719 cpucount++;
720
721 cpu_number_map[i] = cpucount;
722 }
723 else
724 {
725 if(*((volatile unsigned char *)8192)==0xA5)
726 printk("Stuck ??\n");
727 else
728 printk("Not responding val=(%lx).\n", *((unsigned long *) stack));
729 cpu_present_map&=~(1<<i);
730 cpu_number_map[i] = -1;
731 }
732 }
733
734
735 *((volatile unsigned long *)8192) = 0;
736 }
737 else if (i == boot_cpu_id)
738 {
739 cpu_number_map[i] = 0;
740 }
741 else
742 {
743 cpu_number_map[i] = -1;
744 }
745
746 }
747
748
749
750 if(cpucount==0)
751 {
752 printk("Error: only one processor found.\n");
753 cpu_present_map=(1<<smp_processor_id());
754 }
755 else
756 {
757 unsigned long bogosum=0;
758 for(i=0;i<32;i++)
759 {
760 if(cpu_present_map&(1<<i))
761 bogosum+=cpu_data[i].udelay_val;
762 }
763 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
764 cpucount+1,
765 (bogosum+2500)/500000,
766 ((bogosum+2500)/5000)%100);
767 smp_activated=1;
768 smp_num_cpus=cpucount+1;
769 }
770 }
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786 void smp_message_pass(int target, int msg, unsigned long data, int wait)
787 {
788 unsigned long cfg;
789 unsigned long target_map;
790 int p=smp_processor_id();
791 int irq=0x2d;
792 int ct=0;
793 static volatile int message_cpu = NO_PROC_ID;
794
795
796
797
798
799 if(!smp_activated || !smp_commenced)
800 return;
801
802
803
804
805
806
807
808
809 if(msg==MSG_RESCHEDULE)
810 {
811 irq=0x30;
812 if(smp_cpu_in_msg[p])
813 return;
814 }
815
816
817
818
819
820
821
822 if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
823 {
824 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
825 smp_processor_id(),msg,message_cpu, smp_msg_id);
826 }
827 message_cpu=smp_processor_id();
828
829
830
831
832
833
834 smp_cpu_in_msg[p]++;
835
836
837
838
839
840 if(msg!=MSG_RESCHEDULE)
841 {
842 smp_src_cpu=p;
843 smp_msg_id=msg;
844 smp_msg_data=data;
845 }
846
847
848
849
850
851
852
853
854
855 while(ct<1000)
856 {
857 cfg=apic_read(APIC_ICR);
858 if(!(cfg&(1<<12)))
859 break;
860 ct++;
861 udelay(10);
862 }
863
864
865
866
867
868 if(ct==1000)
869 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
870
871
872
873
874
875 cfg=apic_read(APIC_ICR2);
876 cfg&=0x00FFFFFF;
877 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));
878 cfg=apic_read(APIC_ICR);
879 cfg&=~0xFDFFF;
880 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;
881
882
883
884
885
886 if(target==MSG_ALL_BUT_SELF)
887 {
888 cfg|=APIC_DEST_ALLBUT;
889 target_map=cpu_present_map;
890 cpu_callin_map[0]=(1<<smp_src_cpu);
891 }
892 else if(target==MSG_ALL)
893 {
894 cfg|=APIC_DEST_ALLINC;
895 target_map=cpu_present_map;
896 cpu_callin_map[0]=0;
897 }
898 else
899 {
900 target_map=(1<<target);
901 cpu_callin_map[0]=0;
902 }
903
904
905
906
907
908 apic_write(APIC_ICR, cfg);
909
910
911
912
913
914 switch(wait)
915 {
916 case 1:
917 while(cpu_callin_map[0]!=target_map);
918 break;
919 case 2:
920 while(smp_invalidate_needed);
921 break;
922 }
923
924
925
926
927
928 smp_cpu_in_msg[p]--;
929 message_cpu=NO_PROC_ID;
930 }
931
932
933
934
935
936
937
938 void smp_invalidate(void)
939 {
940 unsigned long flags;
941 if(smp_activated && smp_processor_id()!=active_kernel_processor)
942 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
943
944
945
946
947
948
949
950
951
952 smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
953
954
955
956
957
958
959 save_flags(flags);
960 cli();
961 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
962
963
964
965
966
967 local_invalidate();
968
969 restore_flags(flags);
970
971
972
973
974
975
976 }
977
978
979
980
981
982 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
983 {
984 #ifdef DEBUGGING_SMP_RESCHED
985 static int ct=0;
986 if(ct==0)
987 {
988 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
989 ct=1;
990 }
991 #endif
992 if(smp_processor_id()!=active_kernel_processor)
993 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
994 smp_processor_id(), active_kernel_processor);
995
996
997
998
999 if (user_mode(regs))
1000 {
1001 current->utime++;
1002 if (current->pid)
1003 {
1004 if (current->priority < 15)
1005 kstat.cpu_nice++;
1006 else
1007 kstat.cpu_user++;
1008 }
1009
1010 if (current->it_virt_value && !(--current->it_virt_value)) {
1011 current->it_virt_value = current->it_virt_incr;
1012 send_sig(SIGVTALRM,current,1);
1013 }
1014 } else {
1015 current->stime++;
1016 if(current->pid)
1017 kstat.cpu_system++;
1018 #ifdef CONFIG_PROFILE
1019 if (prof_buffer && current->pid) {
1020 extern int _stext;
1021 unsigned long eip = regs->eip - (unsigned long) &_stext;
1022 eip >>= CONFIG_PROFILE_SHIFT;
1023 if (eip < prof_len)
1024 prof_buffer[eip]++;
1025 }
1026 #endif
1027 }
1028
1029
1030
1031 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
1032 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
1033 send_sig(SIGKILL, current, 1);
1034 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
1035 (((current->stime + current->utime) % HZ) == 0)) {
1036 unsigned long psecs = (current->stime + current->utime) / HZ;
1037
1038 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
1039 send_sig(SIGXCPU, current, 1);
1040
1041 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
1042 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
1043 send_sig(SIGXCPU, current, 1);
1044 }
1045
1046
1047 if (current->it_prof_value && !(--current->it_prof_value)) {
1048 current->it_prof_value = current->it_prof_incr;
1049 send_sig(SIGPROF,current,1);
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if ( 0 > --current->counter || current->pid == 0)
1066 {
1067 current->counter = 0;
1068 need_resched=1;
1069 }
1070
1071
1072
1073
1074
1075 apic_read(APIC_SPIV);
1076 apic_write(APIC_EOI, 0);
1077 }
1078
1079
1080
1081
1082
1083 void smp_message_irq(int cpl, struct pt_regs *regs)
1084 {
1085 int i=smp_processor_id();
1086
1087
1088
1089 switch(smp_msg_id)
1090 {
1091 case 0:
1092 return;
1093
1094
1095
1096
1097
1098 case MSG_INVALIDATE_TLB:
1099 if(clear_bit(i,&smp_invalidate_needed))
1100 local_invalidate();
1101 set_bit(i, &cpu_callin_map[0]);
1102 cpu_callin_map[0]|=1<<smp_processor_id();
1103 break;
1104
1105
1106
1107
1108 case MSG_STOP_CPU:
1109 while(1)
1110 {
1111 if(cpu_data[smp_processor_id()].hlt_works_ok)
1112 __asm__("hlt");
1113 }
1114 default:
1115 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1116 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1117 break;
1118 }
1119
1120
1121
1122
1123 apic_read(APIC_SPIV);
1124 apic_write(APIC_EOI, 0);
1125 }