root/arch/sparc/kernel/smp.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. smp_commence
  2. smp_callin
  3. smp_boot_cpus
  4. smp_message_pass
  5. smp_invalidate
  6. smp_invalidate_all
  7. smp_invalidate_mm
  8. smp_invalidate_range
  9. smp_invalidate_page
  10. smp_reschedule_irq
  11. smp_message_irq

   1 /* smp.c: Sparc SMP support.
   2  *
   3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
   4  */
   5 
   6 #include <linux/kernel.h>
   7 #include <linux/smp.h>
   8 
   9 int smp_num_cpus;
  10 int smp_threads_ready;
  11 volatile unsigned long smp_msg_data;
  12 volatile int smp_src_cpu;
  13 volatile int smp_msg_id;
  14 
  15 static volatile int smp_commenced = 0;
  16 
  17 /* The only guaranteed locking primitive available on all Sparc
  18  * processors is 'ldstub [%addr_reg + imm], %dest_reg' which atomically
  19  * places the current byte at the effective address into dest_reg and
  20  * places 0xff there afterwards.  Pretty lame locking primitive
  21  * compared to the Alpha and the intel no?  Most Sparcs have 'swap'
  22  * instruction which is much better...
  23  */
  24 klock_t kernel_lock;
  25 
  26 void smp_commence(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  27 {
  28         /*
  29          *      Lets the callin's below out of their loop.
  30          */
  31         smp_commenced = 1;
  32 }
  33 
  34 void smp_callin(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36         int cpuid = smp_get_processor_id();
  37 
  38         /* XXX Clear the software interrupts _HERE_. */
  39 
  40         sti();
  41         calibrate_delay();
  42         smp_store_cpu_info(cpuid);
  43         set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
  44         local_invalidate_all();
  45         while(!smp_commenced);
  46         if(cpu_number_map[cpuid] == -1)
  47                 while(1);
  48         local_invalidate_all();
  49 }
  50 
  51 void smp_boot_cpus(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {
  53 }
  54 
  55 void smp_message_pass(int target, int msg, unsigned long data, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
  56 {
  57         struct sparc_ipimsg *msg = (struct sparc_ipimsg *) data;
  58         unsigned long target_map;
  59         int p = smp_processor_id();
  60         static volatile int message_cpu = NO_PROC_ID;
  61 
  62         if(!smp_activated || !smp_commenced)
  63                 return;
  64 
  65         if(msg == MSG_RESCHEDULE) {
  66                 if(smp_cpu_in_msg[p])
  67                         return;
  68         }
  69 
  70         if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU) {
  71                 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
  72                       smp_processor_id(),msg,message_cpu, smp_msg_id);
  73         }
  74         message_cpu = smp_processor_id();
  75         smp_cpu_in_msg[p]++;
  76         if(msg != MSG_RESCHEDULE) {
  77                 smp_src_cpu = p;
  78                 smp_msg_id = msg;
  79                 smp_msg_data = data;
  80         }
  81 
  82         if(target == MSG_ALL_BUT_SELF) {
  83                 target_map = cpu_present_map;
  84                 cpu_callin_map[0] = (1<<smp_src_cpu);
  85         } else if(target == MSG_ALL) {
  86                 target_map = cpu_present_map;
  87                 cpu_callin_map[0] = 0;
  88         } else {
  89                 target_map = (1<<target);
  90                 cpu_callin_map[0] = 0;
  91         }
  92 
  93         /* XXX Send lvl15 soft interrupt to cpus here XXX */
  94 
  95         switch(wait) {
  96         case 1:
  97                 while(cpu_callin_map[0] != target_map);
  98                 break;
  99         case 2:
 100                 while(smp_invalidate_needed);
 101                 break;
 102         }
 103         smp_cpu_in_msg[p]--;
 104         message_cpu = NO_PROC_ID;
 105 }
 106 
 107 inline void smp_invalidate(int type, unsigned long a, unsigned long b, unsigned long c)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         smp_invalidate_needed = cpu_present_map & ~(1<<smp_processor_id());
 112         save_flags(flags); cli();
 113         smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
 114         local_invalidate();
 115         restore_flags(flags);
 116 }
 117 
 118 void smp_invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 119 {
 120         smp_invalidate(0, 0, 0, 0);
 121 }
 122 
 123 void smp_invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         smp_invalidate(1, (unsigned long) mm, 0, 0);
 126 }
 127 
 128 void smp_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 {
 130         smp_invalidate(2, (unsigned long) mm, start, end);
 131 }
 132 
 133 void smp_invalidate_page(struct vm_area_struct *vmap, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         smp_invalidate(3, (unsigned long)vmap->vm_mm, page, 0);
 136 }
 137 
 138 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         if(smp_processor_id() != active_kernel_processor)
 141                 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
 142                       smp_processor_id(), active_kernel_processor);
 143         if(user_mode(regs)) {
 144                 current->utime++;
 145                 if (current->pid) {
 146                         if (current->priority < 15)
 147                                 kstat.cpu_nice++;
 148                         else
 149                                 kstat.cpu_user++;
 150                 }
 151                 /* Update ITIMER_VIRT for current task if not in a system call */
 152                 if (current->it_virt_value && !(--current->it_virt_value)) {
 153                         current->it_virt_value = current->it_virt_incr;
 154                         send_sig(SIGVTALRM,current,1);
 155                 }
 156         } else {
 157                 current->stime++;
 158                 if(current->pid)
 159                         kstat.cpu_system++;
 160 #ifdef CONFIG_PROFILE
 161                 if (prof_buffer && current->pid) {
 162                         extern int _stext;
 163                         unsigned long eip = regs->eip - (unsigned long) &_stext;
 164                         eip >>= CONFIG_PROFILE_SHIFT;
 165                         if (eip < prof_len)
 166                                 prof_buffer[eip]++;
 167                 }
 168 #endif
 169         }
 170 
 171         /*
 172          * check the cpu time limit on the process.
 173          */
 174         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 175             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 176                 send_sig(SIGKILL, current, 1);
 177         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 178             (((current->stime + current->utime) % HZ) == 0)) {
 179                 unsigned long psecs = (current->stime + current->utime) / HZ;
 180                 /* send when equal */
 181                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 182                         send_sig(SIGXCPU, current, 1);
 183                 /* and every five seconds thereafter. */
 184                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 185                          ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 186                         send_sig(SIGXCPU, current, 1);
 187         }
 188 
 189         /* Update ITIMER_PROF for the current task */
 190         if (current->it_prof_value && !(--current->it_prof_value)) {
 191                 current->it_prof_value = current->it_prof_incr;
 192                 send_sig(SIGPROF,current,1);
 193         }
 194 
 195         if(0 > --current->counter || current->pid == 0) {
 196                 current->counter = 0;
 197                 need_resched = 1;
 198         }
 199 }
 200 
 201 void smp_message_irq(int cpl, struct pt_regs *regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 202 {
 203         int i=smp_processor_id();
 204 /*      static int n=0;
 205         if(n++<NR_CPUS)
 206                 printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
 207         switch(smp_msg_id)
 208         {
 209                 case 0: /* IRQ 13 testing - boring */
 210                         return;
 211                         
 212                 /*
 213                  *      A TLB flush is needed.
 214                  */
 215                  
 216                 case MSG_INVALIDATE_TLB:
 217                         if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
 218                                 local_invalidate();
 219                         set_bit(i, (unsigned long *)&cpu_callin_map[0]);
 220                         cpu_callin_map[0]|=1<<smp_processor_id();
 221                         break;
 222                         
 223                 /*
 224                  *      Halt other CPU's for a panic or reboot
 225                  */
 226                 case MSG_STOP_CPU:
 227                         while(1)
 228                         {
 229                                 if(cpu_data[smp_processor_id()].hlt_works_ok)
 230                                         __asm__("hlt");
 231                         }
 232                 default:
 233                         printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
 234                                 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
 235                         break;
 236         }
 237         /*
 238          *      Clear the IPI, so we can receive future IPI's
 239          */
 240          
 241         apic_read(APIC_SPIV);           /* Dummy read */
 242         apic_write(APIC_EOI, 0);        /* Docs say use 0 for future compatibility */
 243 }

/* [previous][next][first][last][top][bottom][index][help] */