root/arch/i386/kernel/smp.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mpf_checksum
  2. mpc_family
  3. smp_read_mpc
  4. smp_scan_config
  5. install_trampoline
  6. smp_alloc_memory
  7. get_kernel_stack
  8. smp_store_cpu_info
  9. smp_commence
  10. smp_callin
  11. smp_boot_cpus
  12. smp_message_pass
  13. smp_invalidate
  14. smp_reschedule_irq
  15. smp_message_irq

   1 /*
   2  *      Intel MP v1.1/v1.4 specification support routines for multi-pentium 
   3  *      hosts.
   4  *
   5  *      (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
   6  *      Supported by Caldera http://www.caldera.com.
   7  *      Much of the core SMP work is based on previous work by Thomas Radke, to
   8  *      whom a great many thanks are extended.
   9  *
  10  *
  11  *      This code is released under the GNU public license version 2 or
  12  *      later.
  13  *
  14  *      Fixes
  15  *              Felix Koop      :       NR_CPUS used properly
  16  *              Jose Renau      :       Handle single CPU case.
  17  *              Alan Cox        :       By repeated request 8) - Total BogoMIP report.
  18  *              Greg Wright     :       Fix for kernel stacks panic.
  19  *              Erich Boleyn    :       MP v1.4 and additional changes.
  20  */
  21 
  22 #include <linux/kernel.h>
  23 #include <linux/string.h>
  24 #include <linux/config.h>
  25 #include <linux/timer.h>
  26 #include <linux/sched.h>
  27 #include <linux/mm.h>
  28 #include <linux/kernel_stat.h>
  29 #include <linux/delay.h>
  30 #include <asm/i82489.h>
  31 #include <linux/smp.h>
  32 #include <asm/pgtable.h>
  33 #include <asm/bitops.h>
  34 #include <asm/pgtable.h>
  35 #include <asm/smp.h>
  36 
  37 static int smp_found_config=0;                          /* Have we found an SMP box                             */
  38 
  39 unsigned long cpu_present_map = 0;                      /* Bitmask of existing CPU's                            */
  40 int smp_num_cpus;                                       /* Total count of live CPU's                            */
  41 int smp_threads_ready=0;                                /* Set when the idlers are all forked                   */
  42 volatile unsigned long cpu_number_map[NR_CPUS];         /* which CPU maps to which logical number               */
  43 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};  /* We always use 0 the rest is ready for parallel delivery */
  44 volatile unsigned long smp_invalidate_needed;           /* Used for the invalidate map thats also checked in the spinlock */
  45 struct cpuinfo_x86 cpu_data[NR_CPUS];                   /* Per cpu bogomips and other parameters                */
  46 static unsigned int num_processors = 1;                 /* Internal processor count                             */
  47 static unsigned long io_apic_addr = 0;                  /* Address of the I/O apic (not yet used)               */
  48 unsigned char boot_cpu_id = 0;                          /* Processor that is doing the boot up                  */
  49 static unsigned char *kstack_base,*kstack_end;          /* Kernel stack list pointers                           */
  50 static int smp_activated = 0;                           /* Tripped once we need to start cross invalidating     */
  51 static volatile int smp_commenced=0;                    /* Tripped when we start scheduling                     */
  52 unsigned long apic_addr=0xFEE00000;                     /* Address of APIC (defaults to 0xFEE00000)             */
  53 unsigned long nlong = 0;                                /* dummy used for apic_reg address + 0x20               */
  54 unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;/* Later set to the vremap() of the APIC               */
  55 unsigned long apic_retval;                              /* Just debugging the assembler..                       */
  56 unsigned char *kernel_stacks[NR_CPUS];                  /* Kernel stack pointers for CPU's (debugging)          */
  57 
  58 static volatile unsigned char smp_cpu_in_msg[NR_CPUS];  /* True if this processor is sending an IPI             */
  59 static volatile unsigned long smp_msg_data;             /* IPI data pointer                                     */
  60 static volatile int smp_src_cpu;                        /* IPI sender processor                                 */
  61 static volatile int smp_msg_id;                         /* Message being sent                                   */
  62 
  63 volatile unsigned long kernel_flag=0;                   /* Kernel spinlock                                      */
  64 volatile unsigned char active_kernel_processor = NO_PROC_ID;    /* Processor holding kernel spinlock            */
  65 volatile unsigned long kernel_counter=0;                /* Number of times the processor holds the lock         */
  66 volatile unsigned long syscall_count=0;                 /* Number of times the processor holds the syscall lock */
  67 
  68 volatile unsigned long ipi_count;                       /* Number of IPI's delivered                            */
  69 #ifdef __SMP_PROF__
  70 volatile unsigned long smp_spins[NR_CPUS]={0};          /* Count interrupt spins                                */
  71 volatile unsigned long smp_spins_syscall[NR_CPUS]={0};  /* Count syscall spins                                  */
  72 volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};/* Count spins for the actual syscall                 */
  73 volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0}; /* Count spins for sys_idle                             */
  74 volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};  /* Count idle ticks                                     */
  75 #endif
  76 #if defined (__SMP_PROF__)
  77 volatile unsigned long smp_idle_map=0;                  /* Map for idle processors                              */
  78 #endif
  79 
  80 /*#define SMP_DEBUG*/
  81 
  82 #ifdef SMP_DEBUG
  83 #define SMP_PRINTK(x)   printk x
  84 #else
  85 #define SMP_PRINTK(x)
  86 #endif
  87 
  88 
  89 /* 
  90  *      Checksum an MP configuration block.
  91  */
  92  
  93 static int mpf_checksum(unsigned char *mp, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95         int sum=0;
  96         while(len--)
  97                 sum+=*mp++;
  98         return sum&0xFF;
  99 }
 100 
 101 /*
 102  *      Processor encoding in an MP configuration block
 103  */
 104  
 105 static char *mpc_family(int family,int model)
     /* [previous][next][first][last][top][bottom][index][help] */
 106 {
 107         static char n[32];
 108         static char *model_defs[]=
 109         {
 110                 "80486DX","80486DX",
 111                 "80486SX","80486DX/2 or 80487",
 112                 "80486SL","Intel5X2(tm)",
 113                 "Unknown","Unknown",
 114                 "80486DX/4"
 115         };
 116         if(family==0x5)
 117                 return("Pentium(tm)");
 118         if(family==0x0F && model==0x0F)
 119                 return("Special controller");
 120         if(family==0x04 && model<9)
 121                 return model_defs[model];
 122         sprintf(n,"Unknown CPU [%d:%d]",family, model);
 123         return n;
 124 }
 125 
 126 /*
 127  *      Read the MPC
 128  */
 129 
 130 static int smp_read_mpc(struct mp_config_table *mpc)
     /* [previous][next][first][last][top][bottom][index][help] */
 131 {
 132         char str[16];
 133         int count=sizeof(*mpc);
 134         int apics=0;
 135         unsigned char *mpt=((unsigned char *)mpc)+count;
 136 
 137         if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
 138         {
 139                 printk("Bad signature [%c%c%c%c].\n",
 140                         mpc->mpc_signature[0],
 141                         mpc->mpc_signature[1],
 142                         mpc->mpc_signature[2],
 143                         mpc->mpc_signature[3]);
 144                 return 1;
 145         }
 146         if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
 147         {
 148                 printk("Checksum error.\n");
 149                 return 1;
 150         }
 151         if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
 152         {
 153                 printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
 154                 return 1;
 155         }
 156         memcpy(str,mpc->mpc_oem,8);
 157         str[8]=0;
 158         printk("OEM ID: %s ",str);
 159         memcpy(str,mpc->mpc_productid,12);
 160         str[12]=0;
 161         printk("Product ID: %s ",str);
 162         printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
 163 
 164         /* set the local APIC address */
 165         apic_addr = mpc->mpc_lapic;
 166         
 167         /*
 168          *      Now process the configuration blocks.
 169          */
 170          
 171         while(count<mpc->mpc_length)
 172         {
 173                 switch(*mpt)
 174                 {
 175                         case MP_PROCESSOR:
 176                         {
 177                                 struct mpc_config_processor *m=
 178                                         (struct mpc_config_processor *)mpt;
 179                                 if(m->mpc_cpuflag&CPU_ENABLED)
 180                                 {
 181                                         printk("Processor #%d %s APIC version %d\n",
 182                                                 m->mpc_apicid, 
 183                                                 mpc_family((m->mpc_cpufeature&
 184                                                         CPU_FAMILY_MASK)>>8,
 185                                                         (m->mpc_cpufeature&
 186                                                                 CPU_MODEL_MASK)>>4),
 187                                                 m->mpc_apicver);
 188 #ifdef SMP_DEBUG                                                
 189                                         if(m->mpc_featureflag&(1<<0))
 190                                                 printk("    Floating point unit present.\n");
 191                                         if(m->mpc_featureflag&(1<<7))
 192                                                 printk("    Machine Exception supported.\n");
 193                                         if(m->mpc_featureflag&(1<<8))
 194                                                 printk("    64 bit compare & exchange supported.\n");
 195                                         if(m->mpc_featureflag&(1<<9))
 196                                                 printk("    Internal APIC present.\n");
 197 #endif                                          
 198                                         if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
 199                                         {
 200                                                 SMP_PRINTK(("    Bootup CPU\n"));
 201                                                 boot_cpu_id=m->mpc_apicid;
 202                                                 nlong = boot_cpu_id<<24;        /* Dummy 'self' for bootup */
 203                                         }
 204                                         else    /* Boot CPU already counted */
 205                                                 num_processors++;
 206                                                 
 207                                         if(m->mpc_apicid>NR_CPUS)
 208                                                 printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
 209                                         else                                            
 210                                                 cpu_present_map|=(1<<m->mpc_apicid);
 211                                 }
 212                                 mpt+=sizeof(*m);
 213                                 count+=sizeof(*m);
 214                                 break;
 215                         }
 216                         case MP_BUS:
 217                         {
 218                                 struct mpc_config_bus *m=
 219                                         (struct mpc_config_bus *)mpt;
 220                                 memcpy(str,m->mpc_bustype,6);
 221                                 str[6]=0;
 222                                 SMP_PRINTK(("Bus #%d is %s\n",
 223                                         m->mpc_busid,
 224                                         str));
 225                                 mpt+=sizeof(*m);
 226                                 count+=sizeof(*m);
 227                                 break; 
 228                         }
 229                         case MP_IOAPIC:
 230                         {
 231                                 struct mpc_config_ioapic *m=
 232                                         (struct mpc_config_ioapic *)mpt;
 233                                 if(m->mpc_flags&MPC_APIC_USABLE)
 234                                 {
 235                                         apics++;
 236                                         printk("I/O APIC #%d Version %d at 0x%lX.\n",
 237                                                 m->mpc_apicid,m->mpc_apicver,
 238                                                 m->mpc_apicaddr);
 239                                         io_apic_addr = m->mpc_apicaddr;
 240                                 }
 241                                 mpt+=sizeof(*m);
 242                                 count+=sizeof(*m); 
 243                                 break;
 244                         }
 245                         case MP_INTSRC:
 246                         {
 247                                 struct mpc_config_intsrc *m=
 248                                         (struct mpc_config_intsrc *)mpt;
 249                                 
 250                                 mpt+=sizeof(*m);
 251                                 count+=sizeof(*m);
 252                                 break;
 253                         }
 254                         case MP_LINTSRC:
 255                         {
 256                                 struct mpc_config_intlocal *m=
 257                                         (struct mpc_config_intlocal *)mpt;
 258                                 mpt+=sizeof(*m);
 259                                 count+=sizeof(*m);
 260                                 break;
 261                         }
 262                 }
 263         }
 264         if(apics>1)
 265                 printk("Warning: Multiple APIC's not supported.\n");
 266         return num_processors;                          
 267 }
 268 
 269 /*
 270  *      Scan the memory blocks for an SMP configuration block.
 271  */
 272  
 273 void smp_scan_config(unsigned long base, unsigned long length)
     /* [previous][next][first][last][top][bottom][index][help] */
 274 {
 275         unsigned long *bp=(unsigned long *)base;
 276         struct intel_mp_floating *mpf;
 277         
 278         SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
 279                 bp,length));
 280         if(sizeof(*mpf)!=16)
 281                 printk("Error: MPF size\n");
 282         
 283         while(length>0)
 284         {
 285                 if(*bp==SMP_MAGIC_IDENT)
 286                 {
 287                         mpf=(struct intel_mp_floating *)bp;
 288                         if(mpf->mpf_length==1 && 
 289                                 !mpf_checksum((unsigned char *)bp,16) &&
 290                                 (mpf->mpf_specification == 1
 291                                  || mpf->mpf_specification == 4) )
 292                         {
 293                                 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
 294                                 if(mpf->mpf_feature2&(1<<7))
 295                                         printk("    IMCR and PIC compatibility mode.\n");
 296                                 else
 297                                         printk("    Virtual Wire compatibility mode.\n");
 298                                 smp_found_config=1;
 299                                 /*
 300                                  *      Now see if we need to read further.
 301                                  */
 302                                 if(mpf->mpf_feature1!=0)
 303                                 {
 304                                         num_processors=2;
 305                                         printk("I/O APIC at 0xFEC00000.\n");
 306                                         printk("Bus#0 is ");
 307                                 }
 308                                 switch(mpf->mpf_feature1)
 309                                 {
 310                                         case 1:
 311                                                 printk("ISA");
 312                                                 break;
 313                                         case 2:
 314                                                 printk("EISA with no IRQ8 chaining");
 315                                                 break;
 316                                         case 3:
 317                                                 printk("EISA");
 318                                                 break;
 319                                         case 4:
 320                                                 printk("MCA");
 321                                                 break;
 322                                         case 5:
 323                                                 printk("ISA\nBus#1 is PCI");
 324                                                 break;
 325                                         case 6:
 326                                                 printk("EISA\nBus #1 is PCI");
 327                                                 break;
 328                                         case 7:
 329                                                 printk("MCA\nBus #1 is PCI");
 330                                                 break;
 331                                         case 0:
 332                                                 break;
 333                                         default:
 334                                                 printk("???\nUnknown standard configuration %d\n",
 335                                                         mpf->mpf_feature1);
 336                                                 return;
 337                                 }
 338                                 /*
 339                                  *      Read the physical hardware table. If there isn't one
 340                                  *      the processors present are 0 and 1.
 341                                  */
 342                                 if(mpf->mpf_physptr)
 343                                         smp_read_mpc((void *)mpf->mpf_physptr);
 344                                 else
 345                                         cpu_present_map=3;
 346                                 printk("Processors: %d\n", num_processors);
 347                         }
 348                 }
 349                 bp+=4;
 350                 length-=16;
 351         }
 352 }
 353 
 354 /*
 355  *      Trampoline 80x86 program as an array.
 356  */
 357 
 358 static unsigned char trampoline_data[]={ 
 359 #include  "trampoline.hex"
 360 };
 361 
 362 /*
 363  *      Currently trivial. Write the real->protected mode
 364  *      bootstrap into the page concerned. The caller
 365  *      has made sure its suitably aligned.
 366  */
 367  
 368 static void install_trampoline(unsigned char *mp)
     /* [previous][next][first][last][top][bottom][index][help] */
 369 {
 370         memcpy(mp,trampoline_data,sizeof(trampoline_data));
 371 }
 372 
 373 /*
 374  *      We are called very early to get the low memory for the trampoline/kernel stacks
 375  *      This has to be done by mm/init.c to parcel us out nice low memory. We allocate
 376  *      the kernel stacks at 4K, 8K, 12K... currently (0-03FF is preserved for SMM and
 377  *      other things).
 378  */
 379  
 380 unsigned long smp_alloc_memory(unsigned long mem_base)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382         int size=(num_processors-1)*PAGE_SIZE;          /* Number of stacks needed */
 383         /*
 384          *      Our stacks have to be below the 1Mb line, and mem_base on entry
 385          *      is 4K aligned.
 386          */
 387          
 388         if(mem_base+size>=0x9F000)
 389                 panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
 390         kstack_base=(void *)mem_base;
 391         mem_base+=size;
 392         kstack_end=(void *)mem_base;
 393         return mem_base;
 394 }
 395         
 396 /*
 397  *      Hand out stacks one at a time.
 398  */
 399  
 400 static void *get_kernel_stack(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 401 {
 402         void *stack=kstack_base;
 403         if(kstack_base>=kstack_end)
 404                 return NULL;
 405         kstack_base+=PAGE_SIZE;
 406         return stack;
 407 }
 408 
 409 
 410 /*
 411  *      The bootstrap kernel entry code has set these up. Save them for
 412  *      a given CPU
 413  */
 414  
 415 void smp_store_cpu_info(int id)
     /* [previous][next][first][last][top][bottom][index][help] */
 416 {
 417         struct cpuinfo_x86 *c=&cpu_data[id];
 418         c->hard_math=hard_math;                 /* Always assumed same currently */
 419         c->x86=x86;
 420         c->x86_model=x86_model;
 421         c->x86_mask=x86_mask;
 422         c->x86_capability=x86_capability;
 423         c->fdiv_bug=fdiv_bug;
 424         c->wp_works_ok=wp_works_ok;             /* Always assumed the same currently */
 425         c->hlt_works_ok=hlt_works_ok;
 426         c->udelay_val=loops_per_sec;
 427         strcpy(c->x86_vendor_id, x86_vendor_id);
 428 }
 429 
 430 /*
 431  *      Architecture specific routine called by the kernel just before init is
 432  *      fired off. This allows the BP to have everything in order [we hope].
 433  *      At the end of this all the AP's will hit the system scheduling and off
 434  *      we go. Each AP will load the system gdt's and jump through the kernel
 435  *      init into idle(). At this point the scheduler will one day take over 
 436  *      and give them jobs to do. smp_callin is a standard routine
 437  *      we use to track CPU's as they power up.
 438  */
 439 
 440 void smp_commence(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 441 {
 442         /*
 443          *      Lets the callin's below out of their loop.
 444          */
 445         smp_commenced=1;
 446 }
 447  
 448 void smp_callin(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         int cpuid=GET_APIC_ID(apic_read(APIC_ID));
 451         unsigned long l;
 452         /*
 453          *      Activate our APIC
 454          */
 455          
 456         SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
 457         l=apic_read(APIC_SPIV);
 458         l|=(1<<8);              /* Enable */
 459         apic_write(APIC_SPIV,l);
 460         sti();
 461         /*
 462          *      Get our bogomips.
 463          */     
 464         calibrate_delay();
 465         /*
 466          *      Save our processor parameters
 467          */
 468         smp_store_cpu_info(cpuid);
 469         /*
 470          *      Allow the master to continue.
 471          */     
 472         set_bit(cpuid, &cpu_callin_map[0]);
 473         /*
 474          *      Until we are ready for SMP scheduling
 475          */
 476         load_ldt(0);
 477 /*      printk("Testing faulting...\n");
 478         *(long *)0=1;            OOPS... */
 479         local_invalidate();
 480         while(!smp_commenced);
 481         if (cpu_number_map[cpuid] == -1)
 482                 while(1);
 483         local_invalidate();
 484         SMP_PRINTK(("Commenced..\n"));
 485         
 486         load_TR(cpu_number_map[cpuid]);
 487 /*      while(1);*/
 488 }
 489 
 490 /*
 491  *      Cycle through the processors sending pentium IPI's to boot each.
 492  */
 493  
 494 void smp_boot_cpus(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 495 {
 496         int i=0;
 497         int cpucount=0;
 498         void *stack;
 499         extern unsigned long init_user_stack[];
 500         
 501         /*
 502          *      Map the local APIC into kernel space
 503          */
 504 
 505         /* Mapping on non-Intel conforming platforms is a bad move. */
 506         if (1<cpu_present_map)  
 507                 apic_reg = vremap(0xFEE00000,4096);
 508         
 509         
 510         if(apic_reg == NULL)
 511                 panic("Unable to map local apic.\n");
 512                 
 513 #ifdef SMP_DEBUG                
 514         {
 515                 int reg;
 516 
 517                 reg = apic_read(APIC_VERSION);
 518                 printk("Getting VERSION: %x\n", reg);
 519 
 520                 apic_write(APIC_VERSION, 0);
 521                 reg = apic_read(APIC_VERSION);
 522                 printk("Getting VERSION: %x\n", reg);
 523 
 524                 reg = apic_read(APIC_LVT0);
 525                 printk("Getting LVT0: %x\n", reg);
 526 
 527                 reg = apic_read(APIC_LVT1);
 528                 printk("Getting LVT1: %x\n", reg);
 529         }
 530 #endif
 531         
 532         /*
 533          *      Now scan the cpu present map and fire up anything we find.
 534          */
 535          
 536         kernel_stacks[boot_cpu_id]=(void *)init_user_stack;     /* Set up for boot processor first */
 537 
 538         smp_store_cpu_info(boot_cpu_id);                        /* Final full version of the data */
 539         
 540         active_kernel_processor=boot_cpu_id;
 541 
 542         SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
 543                 
 544         for(i=0;i<NR_CPUS;i++)
 545         {
 546                 if((cpu_present_map&(1<<i)) && i!=boot_cpu_id)          /* Rebooting yourself is a bad move */
 547                 {
 548                         unsigned long cfg, send_status, accept_status;
 549                         int timeout;
 550                         
 551                         /*
 552                          *      We need a kernel stack for each processor.
 553                          */
 554                         
 555                         stack=get_kernel_stack();       /* We allocated these earlier */
 556                         if(stack==NULL)
 557                                 panic("No memory for processor stacks.\n");
 558                         kernel_stacks[i]=stack;
 559                         install_trampoline(stack);
 560 
 561                         printk("Booting processor %d stack %p: ",i,stack);                      /* So we set whats up   */
 562                                 
 563                         /*
 564                          *      Enable the local APIC
 565                          */
 566                          
 567                         cfg=apic_read(APIC_SPIV);
 568                         cfg|=(1<<8);            /* Enable APIC */
 569                         apic_write(APIC_SPIV,cfg);
 570                         
 571                         /*
 572                          *      This gunge runs the startup process for
 573                          *      the targeted processor.
 574                          */
 575 
 576 #ifdef EEK
 577                         SMP_PRINTK(("Setting warm reset code and vector.\n"));
 578 
 579                         CMOS_WRITE(0xa, 0xf);
 580                         *((volatile unsigned short *) 0x467) = (unsigned short)(stack>>4);
 581                         *((volatile unsigned short *) 0x469) = 0;
 582 #endif
 583 
 584                         apic_write(APIC_ESR, 0);
 585                         accept_status = (apic_read(APIC_ESR) & 0xEF);
 586                         send_status = 0;
 587                         accept_status = 0;
 588 
 589                         SMP_PRINTK(("Asserting INIT.\n"));
 590 
 591                         cfg=apic_read(APIC_ICR2);
 592                         cfg&=0x00FFFFFF;
 593                         apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
 594                         cfg=apic_read(APIC_ICR);
 595                         cfg&=~0xCDFFF;                                                          /* Clear bits           */
 596                         cfg|=0x0000c500;        /* Urgh.. fix for constants */
 597                         apic_write(APIC_ICR, cfg);                                              /* Send IPI */
 598 
 599                         timeout = 0;
 600                         do {
 601                                 udelay(1000);
 602                                 if ((send_status = (!(apic_read(APIC_ICR) & 0x00001000))))
 603                                         break;
 604                         } while (timeout++ < 1000);
 605 
 606 #ifdef EEK2
 607                         if (send_status) {
 608                                 apic_write(APIC_ESR, 0);
 609                                 accept_status = (apic_read(APIC_ESR) & 0xEF);
 610                         }
 611 #endif
 612 
 613                         if (send_status && !accept_status)
 614                         {
 615                                 SMP_PRINTK(("Deasserting INIT.\n"));
 616                         
 617                                 cfg=apic_read(APIC_ICR2);
 618                                 cfg&=0x00FFFFFF;
 619                                 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
 620                                 cfg=apic_read(APIC_ICR);
 621                                 cfg&=~0xCDFFF;                                                          /* Clear bits           */
 622                                 cfg|=0x00008500;
 623                                 apic_write(APIC_ICR, cfg);                                              /* Send IPI */
 624 
 625                                 timeout = 0;
 626                                 do {
 627                                         udelay(1000);
 628                                         if ((send_status = !(apic_read(APIC_ICR) & 0x00001000) ))
 629                                                 break;
 630                                 } while (timeout++ < 1000);
 631 
 632                                 if (send_status) {
 633                                         udelay(1000000);
 634                                         apic_write(APIC_ESR, 0);
 635                                         accept_status = (apic_read(APIC_ESR) & 0xEF);
 636                                 }
 637                         }
 638 
 639                         /*
 640                          *      We currently assume an integrated
 641                          *      APIC only, so STARTUP IPIs must be
 642                          *      sent as well.
 643                          */
 644 
 645                         if (send_status && !accept_status)
 646                         {
 647                                 SMP_PRINTK(("Sending first STARTUP.\n"));
 648                         
 649                                 /*
 650                                  *      First STARTUP IPI
 651                                  */
 652 
 653                                 cfg=apic_read(APIC_ICR2);
 654                                 cfg&=0x00FFFFFF;
 655                                 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
 656                                 cfg=apic_read(APIC_ICR);
 657                                 cfg&=~0xCDFFF   ;                                                       /* Clear bits           */
 658                                 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12); /* Boot on the stack    */              
 659                                 apic_write(APIC_ICR, cfg);                                              /* Kick the second      */
 660 
 661                                 timeout = 0;
 662                                 do {
 663                                         udelay(1000);
 664                                         if ((send_status = !(apic_read(APIC_ICR) & 0x00001000)) )
 665                                                 break;
 666                                 } while (timeout++ < 1000);
 667 
 668                                 if (send_status) {
 669                                         udelay(1000000);
 670                                         apic_write(APIC_ESR, 0);
 671                                         accept_status = (apic_read(APIC_ESR) & 0xEF);
 672                                 }
 673                         }
 674 
 675                         if (send_status && !accept_status)
 676                         {
 677                                 SMP_PRINTK(("Sending second STARTUP.\n"));
 678                         
 679                                 /*
 680                                  *      Second STARTUP IPI
 681                                  */
 682 
 683                                 cfg=apic_read(APIC_ICR2);
 684                                 cfg&=0x00FFFFFF;
 685                                 apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
 686                                 cfg=apic_read(APIC_ICR);
 687                                 cfg&=~0xCDFFF   ;                                                       /* Clear bits           */
 688                                 cfg|=APIC_DEST_FIELD|APIC_DEST_DM_STARTUP|(((unsigned long)stack)>>12); /* Boot on the stack    */              
 689                                 apic_write(APIC_ICR, cfg);                                              /* Kick the second      */
 690 
 691                                 timeout = 0;
 692                                 do {
 693                                         udelay(1000);
 694                                         if ((send_status = !(apic_read(APIC_ICR) & 0x00001000)))
 695                                                 break;
 696                                 } while (timeout++ < 1000);
 697 
 698                                 if (send_status) {
 699                                         udelay(1000000);
 700                                         apic_write(APIC_ESR, 0);
 701                                         accept_status = (apic_read(APIC_ESR) & 0xEF);
 702                                 }
 703                         }
 704 
 705                         if (!send_status)               /* APIC never delivered?? */
 706                                 printk("APIC never delivered???\n");
 707                         else if (accept_status)         /* Send accept error */
 708                                 printk("APIC delivery error (%lx).\n", accept_status);
 709                         else
 710                         {
 711                                 for(timeout=0;timeout<50000;timeout++)
 712                                 {
 713                                         if(cpu_callin_map[0]&(1<<i))
 714                                                 break;                          /* It has booted */
 715                                         udelay(100);                            /* Wait 5s total for a response */
 716                                 }
 717                                 if(cpu_callin_map[0]&(1<<i))
 718                                 {
 719                                         cpucount++;
 720                                         /* number CPUs logically, starting from 1 (BSP is 0) */
 721                                         cpu_number_map[i] = cpucount;
 722                                 }
 723                                 else
 724                                 {
 725                                         if(*((volatile unsigned char *)8192)==0xA5)
 726                                                 printk("Stuck ??\n");
 727                                         else
 728                                                 printk("Not responding val=(%lx).\n", *((unsigned long *) stack));
 729                                         cpu_present_map&=~(1<<i);
 730                                         cpu_number_map[i] = -1;
 731                                 }
 732                         }
 733 
 734                         /* mark "stuck" area as not stuck */
 735                         *((volatile unsigned long *)8192) = 0;
 736                 }
 737                 else if (i == boot_cpu_id)
 738                 {
 739                         cpu_number_map[i] = 0;
 740                 }
 741                 else
 742                 {
 743                         cpu_number_map[i] = -1;
 744                 }
 745 
 746         }
 747         /*
 748          *      Allow the user to impress friends.
 749          */
 750         if(cpucount==0)
 751         {
 752                 printk("Error: only one processor found.\n");
 753                 cpu_present_map=(1<<smp_processor_id());
 754         }
 755         else
 756         {
 757                 unsigned long bogosum=0;
 758                 for(i=0;i<32;i++)
 759                 {
 760                         if(cpu_present_map&(1<<i))
 761                                 bogosum+=cpu_data[i].udelay_val;
 762                 }
 763                 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 
 764                         cpucount+1, 
 765                         (bogosum+2500)/500000,
 766                         ((bogosum+2500)/5000)%100);
 767                 smp_activated=1;
 768                 smp_num_cpus=cpucount+1;
 769         }
 770 }
 771 
 772 
 773 /*
 774  *      A non wait message cannot pass data or cpu source info. This current setup
 775  *      is only safe because the kernel lock owner is the only person who can send a message.
 776  *
 777  *      Wrapping this whole block in a spinlock is not the safe answer either. A processor may
 778  *      get stuck with irq's off waiting to send a message and thus not replying to the person
 779  *      spinning for a reply....
 780  *
 781  *      In the end invalidate ought to be the NMI and a very very short function (to avoid the old
 782  *      IDE disk problems), and other messages sent with IRQ's enabled in a civilised fashion. That
 783  *      will also boost performance.
 784  */
 785  
 786 void smp_message_pass(int target, int msg, unsigned long data, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 787 {
 788         unsigned long cfg;
 789         unsigned long target_map;
 790         int p=smp_processor_id();
 791         int irq=0x2d;                                                           /* IRQ 13 */
 792         int ct=0;
 793         static volatile int message_cpu = NO_PROC_ID;
 794 
 795         /*
 796          *      During boot up send no messages
 797          */
 798          
 799         if(!smp_activated || !smp_commenced)
 800                 return;
 801                 
 802         
 803         /*
 804          *      Skip the reschedule if we are waiting to clear a
 805          *      message at this time. The reschedule cannot wait
 806          *      but is not critical.
 807          */
 808         
 809         if(msg==MSG_RESCHEDULE)                                                 /* Reschedules we do via trap 0x30 */
 810         {
 811                 irq=0x30;
 812                 if(smp_cpu_in_msg[p])
 813                         return;
 814         }
 815 
 816         /*
 817          *      Sanity check we don't re-enter this across CPU's. Only the kernel
 818          *      lock holder may send messages. For a STOP_CPU we are bringing the
 819          *      entire box to the fastest halt we can.. 
 820          */
 821          
 822         if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU)
 823         {
 824                 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
 825                         smp_processor_id(),msg,message_cpu, smp_msg_id);
 826         }
 827         message_cpu=smp_processor_id();
 828         
 829 
 830         /*
 831          *      We are busy
 832          */
 833                 
 834         smp_cpu_in_msg[p]++;
 835         
 836         /*
 837          *      Reschedule is currently special
 838          */
 839          
 840         if(msg!=MSG_RESCHEDULE)
 841         {
 842                 smp_src_cpu=p;
 843                 smp_msg_id=msg;
 844                 smp_msg_data=data;
 845         }
 846         
 847 /*      printk("SMP message pass #%d to %d of %d\n",
 848                 p, msg, target);*/
 849         
 850         /*
 851          *      Wait for the APIC to become ready - this should never occur. Its
 852          *      a debugging check really.
 853          */
 854          
 855         while(ct<1000)
 856         {
 857                 cfg=apic_read(APIC_ICR);
 858                 if(!(cfg&(1<<12)))
 859                         break;
 860                 ct++;
 861                 udelay(10);
 862         }
 863         
 864         /*
 865          *      Just pray... there is nothing more we can do
 866          */
 867          
 868         if(ct==1000)
 869                 printk("CPU #%d: previous IPI still not cleared after 10mS", smp_processor_id());
 870                 
 871         /*
 872          *      Program the APIC to deliver the IPI
 873          */
 874          
 875         cfg=apic_read(APIC_ICR2);
 876         cfg&=0x00FFFFFF;
 877         apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));                 /* Target chip                  */
 878         cfg=apic_read(APIC_ICR);
 879         cfg&=~0xFDFFF;                                                          /* Clear bits                   */
 880         cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;                            /* Send an IRQ 13               */              
 881 
 882         /*
 883          *      Set the target requirement
 884          */
 885          
 886         if(target==MSG_ALL_BUT_SELF)
 887         {
 888                 cfg|=APIC_DEST_ALLBUT;
 889                 target_map=cpu_present_map;
 890                 cpu_callin_map[0]=(1<<smp_src_cpu);
 891         }
 892         else if(target==MSG_ALL)
 893         {
 894                 cfg|=APIC_DEST_ALLINC;
 895                 target_map=cpu_present_map;
 896                 cpu_callin_map[0]=0;
 897         }
 898         else
 899         {
 900                 target_map=(1<<target);
 901                 cpu_callin_map[0]=0;
 902         }
 903                 
 904         /*
 905          *      Send the IPI. The write to APIC_ICR fires this off.
 906          */
 907          
 908         apic_write(APIC_ICR, cfg);      
 909         
 910         /*
 911          *      Spin waiting for completion
 912          */
 913          
 914         switch(wait)
 915         {
 916                 case 1:
 917                         while(cpu_callin_map[0]!=target_map);           /* Spin on the pass             */
 918                         break;
 919                 case 2:
 920                         while(smp_invalidate_needed);                   /* Wait for invalidate map to clear */
 921                         break;
 922         }
 923         
 924         /*
 925          *      Record our completion
 926          */
 927          
 928         smp_cpu_in_msg[p]--;
 929         message_cpu=NO_PROC_ID;
 930 }
 931 
 932 /*
 933  *      This is fraught with deadlocks. Linus does an invalidate at a whim
 934  *      even with IRQ's off. We have to avoid a pair of crossing invalidates
 935  *      or we are doomed.  See the notes about smp_message_pass.
 936  */
 937  
 938 void smp_invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 939 {
 940         unsigned long flags;
 941         if(smp_activated && smp_processor_id()!=active_kernel_processor)
 942                 panic("CPU #%d:Attempted invalidate IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
 943 /*      printk("SMI-");*/
 944         
 945         /*
 946          *      The assignment is safe because its volatile so the compiler cannot reorder it,
 947          *      because the i586 has strict memory ordering and because only the kernel lock holder
 948          *      may issue an invalidate. If you break any one of those three change this to an atomic
 949          *      bus locked or.
 950          */
 951         
 952         smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
 953         
 954         /*
 955          *      Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will
 956          *      ensure they dont do a spurious invalidate or miss one.
 957          */
 958          
 959         save_flags(flags);
 960         cli();
 961         smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
 962         
 963         /*
 964          *      Flush the local TLB
 965          */
 966          
 967         local_invalidate();
 968         
 969         restore_flags(flags);
 970         
 971         /*
 972          *      Completed.
 973          */
 974          
 975 /*      printk("SMID\n");*/
 976 }
 977 
 978 /*      
 979  *      Reschedule call back
 980  */
 981 
 982 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 983 {
 984 #ifdef DEBUGGING_SMP_RESCHED
 985         static int ct=0;
 986         if(ct==0)
 987         {
 988                 printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
 989                 ct=1;
 990         }
 991 #endif  
 992         if(smp_processor_id()!=active_kernel_processor)
 993                 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
 994                         smp_processor_id(), active_kernel_processor);
 995         /*
 996          *      Update resource usage on the slave timer tick.
 997          */
 998                         
 999         if (user_mode(regs)) 
1000         {
1001                 current->utime++;
1002                 if (current->pid) 
1003                 {
1004                         if (current->priority < 15)
1005                                 kstat.cpu_nice++;
1006                         else
1007                                 kstat.cpu_user++;
1008                 }
1009                 /* Update ITIMER_VIRT for current task if not in a system call */
1010                 if (current->it_virt_value && !(--current->it_virt_value)) {
1011                         current->it_virt_value = current->it_virt_incr;
1012                         send_sig(SIGVTALRM,current,1);
1013                 }
1014         } else {
1015                 current->stime++;
1016                 if(current->pid)
1017                         kstat.cpu_system++;
1018 #ifdef CONFIG_PROFILE
1019                 if (prof_buffer && current->pid) {
1020                         extern int _stext;
1021                         unsigned long eip = regs->eip - (unsigned long) &_stext;
1022                         eip >>= CONFIG_PROFILE_SHIFT;
1023                         if (eip < prof_len)
1024                                 prof_buffer[eip]++;
1025                 }
1026 #endif
1027         }
1028         /*
1029          * check the cpu time limit on the process.
1030          */
1031         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
1032             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
1033                 send_sig(SIGKILL, current, 1);
1034         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
1035             (((current->stime + current->utime) % HZ) == 0)) {
1036                 unsigned long psecs = (current->stime + current->utime) / HZ;
1037                 /* send when equal */
1038                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
1039                         send_sig(SIGXCPU, current, 1);
1040                 /* and every five seconds thereafter. */
1041                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
1042                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
1043                         send_sig(SIGXCPU, current, 1);
1044         }
1045 
1046         /* Update ITIMER_PROF for the current task */
1047         if (current->it_prof_value && !(--current->it_prof_value)) {
1048                 current->it_prof_value = current->it_prof_incr;
1049                 send_sig(SIGPROF,current,1);
1050         }
1051 
1052 
1053         /*
1054          *      Don't reschedule if we are in an interrupt...
1055          *      [This is test code and not needed in the end]
1056          */
1057          
1058 /*      if(intr_count==1)
1059         {*/
1060 
1061                 /*
1062                  *      See if the slave processors need a schedule.
1063                  */
1064 
1065                 if ( 0 > --current->counter || current->pid == 0) 
1066                 {
1067                         current->counter = 0;
1068                         need_resched=1;
1069                 }
1070 /*      }*/
1071 
1072         /*
1073          *      Clear the IPI
1074          */
1075         apic_read(APIC_SPIV);           /* Dummy read */
1076         apic_write(APIC_EOI, 0);        /* Docs say use 0 for future compatibility */
1077 }       
1078 
1079 /*
1080  *      Message call back.
1081  */
1082  
1083 void smp_message_irq(int cpl, struct pt_regs *regs)
     /* [previous][next][first][last][top][bottom][index][help] */
1084 {
1085         int i=smp_processor_id();
1086 /*      static int n=0;
1087         if(n++<NR_CPUS)
1088                 printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
1089         switch(smp_msg_id)
1090         {
1091                 case 0: /* IRQ 13 testing - boring */
1092                         return;
1093                         
1094                 /*
1095                  *      A TLB flush is needed.
1096                  */
1097                  
1098                 case MSG_INVALIDATE_TLB:
1099                         if(clear_bit(i,&smp_invalidate_needed))
1100                                 local_invalidate();
1101                         set_bit(i, &cpu_callin_map[0]);
1102                         cpu_callin_map[0]|=1<<smp_processor_id();
1103                         break;
1104                         
1105                 /*
1106                  *      Halt other CPU's for a panic or reboot
1107                  */
1108                 case MSG_STOP_CPU:
1109                         while(1)
1110                         {
1111                                 if(cpu_data[smp_processor_id()].hlt_works_ok)
1112                                         __asm__("hlt");
1113                         }
1114                 default:
1115                         printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1116                                 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1117                         break;
1118         }
1119         /*
1120          *      Clear the IPI, so we can receive future IPI's
1121          */
1122          
1123         apic_read(APIC_SPIV);           /* Dummy read */
1124         apic_write(APIC_EOI, 0);        /* Docs say use 0 for future compatibility */
1125 }

/* [previous][next][first][last][top][bottom][index][help] */