This source file includes following definitions.
- smp_commence
- smp_callin
- smp_boot_cpus
- smp_message_pass
- smp_invalidate
- smp_invalidate_all
- smp_invalidate_mm
- smp_invalidate_range
- smp_invalidate_page
- smp_reschedule_irq
- smp_message_irq
1
2
3
4
5
6 #include <linux/kernel.h>
7 #include <linux/smp.h>
8
9 int smp_num_cpus;
10 int smp_threads_ready;
11 volatile unsigned long smp_msg_data;
12 volatile int smp_src_cpu;
13 volatile int smp_msg_id;
14
15 static volatile int smp_commenced = 0;
16
17
18
19
20
21
22
23
24 klock_t kernel_lock;
25
26 void smp_commence(void)
27 {
28
29
30
31 smp_commenced = 1;
32 }
33
34 void smp_callin(void)
35 {
36 int cpuid = smp_get_processor_id();
37
38
39
40 sti();
41 calibrate_delay();
42 smp_store_cpu_info(cpuid);
43 set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
44 local_invalidate_all();
45 while(!smp_commenced);
46 if(cpu_number_map[cpuid] == -1)
47 while(1);
48 local_invalidate_all();
49 }
50
51 void smp_boot_cpus(void)
52 {
53 }
54
55 void smp_message_pass(int target, int msg, unsigned long data, int wait)
56 {
57 struct sparc_ipimsg *msg = (struct sparc_ipimsg *) data;
58 unsigned long target_map;
59 int p = smp_processor_id();
60 static volatile int message_cpu = NO_PROC_ID;
61
62 if(!smp_activated || !smp_commenced)
63 return;
64
65 if(msg == MSG_RESCHEDULE) {
66 if(smp_cpu_in_msg[p])
67 return;
68 }
69
70 if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU) {
71 panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
72 smp_processor_id(),msg,message_cpu, smp_msg_id);
73 }
74 message_cpu = smp_processor_id();
75 smp_cpu_in_msg[p]++;
76 if(msg != MSG_RESCHEDULE) {
77 smp_src_cpu = p;
78 smp_msg_id = msg;
79 smp_msg_data = data;
80 }
81
82 if(target == MSG_ALL_BUT_SELF) {
83 target_map = cpu_present_map;
84 cpu_callin_map[0] = (1<<smp_src_cpu);
85 } else if(target == MSG_ALL) {
86 target_map = cpu_present_map;
87 cpu_callin_map[0] = 0;
88 } else {
89 target_map = (1<<target);
90 cpu_callin_map[0] = 0;
91 }
92
93
94
95 switch(wait) {
96 case 1:
97 while(cpu_callin_map[0] != target_map);
98 break;
99 case 2:
100 while(smp_invalidate_needed);
101 break;
102 }
103 smp_cpu_in_msg[p]--;
104 message_cpu = NO_PROC_ID;
105 }
106
107 inline void smp_invalidate(int type, unsigned long a, unsigned long b, unsigned long c)
108 {
109 unsigned long flags;
110
111 smp_invalidate_needed = cpu_present_map & ~(1<<smp_processor_id());
112 save_flags(flags); cli();
113 smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
114 local_invalidate();
115 restore_flags(flags);
116 }
117
118 void smp_invalidate_all(void)
119 {
120 smp_invalidate(0, 0, 0, 0);
121 }
122
123 void smp_invalidate_mm(struct mm_struct *mm)
124 {
125 smp_invalidate(1, (unsigned long) mm, 0, 0);
126 }
127
128 void smp_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
129 {
130 smp_invalidate(2, (unsigned long) mm, start, end);
131 }
132
133 void smp_invalidate_page(struct vm_area_struct *vmap, unsigned long page)
134 {
135 smp_invalidate(3, (unsigned long)vmap->vm_mm, page, 0);
136 }
137
138 void smp_reschedule_irq(int cpl, struct pt_regs *regs)
139 {
140 if(smp_processor_id() != active_kernel_processor)
141 panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
142 smp_processor_id(), active_kernel_processor);
143 if(user_mode(regs)) {
144 current->utime++;
145 if (current->pid) {
146 if (current->priority < 15)
147 kstat.cpu_nice++;
148 else
149 kstat.cpu_user++;
150 }
151
152 if (current->it_virt_value && !(--current->it_virt_value)) {
153 current->it_virt_value = current->it_virt_incr;
154 send_sig(SIGVTALRM,current,1);
155 }
156 } else {
157 current->stime++;
158 if(current->pid)
159 kstat.cpu_system++;
160 #ifdef CONFIG_PROFILE
161 if (prof_buffer && current->pid) {
162 extern int _stext;
163 unsigned long eip = regs->eip - (unsigned long) &_stext;
164 eip >>= CONFIG_PROFILE_SHIFT;
165 if (eip < prof_len)
166 prof_buffer[eip]++;
167 }
168 #endif
169 }
170
171
172
173
174 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
175 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
176 send_sig(SIGKILL, current, 1);
177 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
178 (((current->stime + current->utime) % HZ) == 0)) {
179 unsigned long psecs = (current->stime + current->utime) / HZ;
180
181 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
182 send_sig(SIGXCPU, current, 1);
183
184 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
185 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
186 send_sig(SIGXCPU, current, 1);
187 }
188
189
190 if (current->it_prof_value && !(--current->it_prof_value)) {
191 current->it_prof_value = current->it_prof_incr;
192 send_sig(SIGPROF,current,1);
193 }
194
195 if(0 > --current->counter || current->pid == 0) {
196 current->counter = 0;
197 need_resched = 1;
198 }
199 }
200
201 void smp_message_irq(int cpl, struct pt_regs *regs)
202 {
203 int i=smp_processor_id();
204
205
206
207 switch(smp_msg_id)
208 {
209 case 0:
210 return;
211
212
213
214
215
216 case MSG_INVALIDATE_TLB:
217 if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
218 local_invalidate();
219 set_bit(i, (unsigned long *)&cpu_callin_map[0]);
220 cpu_callin_map[0]|=1<<smp_processor_id();
221 break;
222
223
224
225
226 case MSG_STOP_CPU:
227 while(1)
228 {
229 if(cpu_data[smp_processor_id()].hlt_works_ok)
230 __asm__("hlt");
231 }
232 default:
233 printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
234 smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
235 break;
236 }
237
238
239
240
241 apic_read(APIC_SPIV);
242 apic_write(APIC_EOI, 0);
243 }