This source file includes following definitions.
- setipl
- getipl
- swpipl
- xchg_u32
- __xchg
1
2 #ifndef __SPARC_SYSTEM_H
3 #define __SPARC_SYSTEM_H
4
5 #include <linux/kernel.h>
6
7 #include <asm/segment.h>
8
9 #ifdef __KERNEL__
10 #include <asm/page.h>
11 #include <asm/oplib.h>
12 #include <asm/psr.h>
13 #endif
14
15 #define EMPTY_PGT (&empty_bad_page)
16 #define EMPTY_PGE (&empty_bad_page_table)
17
18 #ifndef __ASSEMBLY__
19
20
21
22
23 enum sparc_cpu {
24 sun4 = 0x00,
25 sun4c = 0x01,
26 sun4m = 0x02,
27 sun4d = 0x03,
28 sun4e = 0x04,
29 sun4u = 0x05,
30 sun_unknown = 0x06,
31 };
32
33 extern enum sparc_cpu sparc_cpu_model;
34
35 extern unsigned long empty_bad_page;
36 extern unsigned long empty_bad_page_table;
37 extern unsigned long empty_zero_page;
38
39 extern struct linux_romvec *romvec;
40 #define halt() romvec->pv_halt()
41
42
43
44
45
46
47 extern void flush_user_windows(void);
48 extern void synchronize_user_stack(void);
49 extern void sparc_switch_to(void *new_task);
50 #ifndef __SMP__
51 #define switch_to(prev, next) do { \
52 flush_user_windows(); \
53 switch_to_context(next); \
54 prev->tss.current_ds = active_ds; \
55 active_ds = next->tss.current_ds; \
56 if(last_task_used_math != next) \
57 next->tss.kregs->psr &= ~PSR_EF; \
58 sparc_switch_to(next); \
59 } while(0)
60 #else
61
62 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
63 void *fpqueue, unsigned long *fpqdepth);
64
65 #define switch_to(prev, next) do { \
66 cli(); \
67 if(prev->flags & PF_USEDFPU) { \
68 fpsave(&prev->tss.float_regs[0], &prev->tss.fsr, \
69 &prev->tss.fpqueue[0], &prev->tss.fpqdepth); \
70 prev->flags &= ~PF_USEDFPU; \
71 prev->tss.kregs->psr &= ~PSR_EF; \
72 } \
73 prev->lock_depth = syscall_count; \
74 kernel_counter += (next->lock_depth - prev->lock_depth); \
75 syscall_count = next->lock_depth; \
76 flush_user_windows(); \
77 switch_to_context(next); \
78 prev->tss.current_ds = active_ds; \
79 active_ds = next->tss.current_ds; \
80 sparc_switch_to(next); \
81 sti(); \
82 } while(0)
83 #endif
84
85
86 extern inline void setipl(int __new_ipl)
87 {
88 __asm__ __volatile__("rd %%psr, %%g1\n\t"
89 "andn %%g1, %1, %%g1\n\t"
90 "sll %0, 8, %%g2\n\t"
91 "and %%g2, %1, %%g2\n\t"
92 "or %%g1, %%g2, %%g1\n\t"
93 "wr %%g1, 0x0, %%psr\n\t"
94 "nop; nop; nop\n\t" : :
95 "r" (__new_ipl), "i" (PSR_PIL) :
96 "g1", "g2");
97 }
98
99 extern inline int getipl(void)
100 {
101 int retval;
102
103 __asm__ __volatile__("rd %%psr, %0\n\t"
104 "and %0, %1, %0\n\t"
105 "srl %0, 8, %0\n\t" :
106 "=r" (retval) :
107 "i" (PSR_PIL));
108 return retval;
109 }
110
111 extern inline int swpipl(int __new_ipl)
112 {
113 int retval;
114
115 __asm__ __volatile__("rd %%psr, %%g1\n\t"
116 "srl %%g1, 8, %0\n\t"
117 "and %0, 15, %0\n\t"
118 "andn %%g1, %2, %%g1\n\t"
119 "and %1, 15, %%g2\n\t"
120 "sll %%g2, 8, %%g2\n\t"
121 "or %%g1, %%g2, %%g1\n\t"
122 "wr %%g1, 0x0, %%psr\n\t"
123 "nop; nop; nop\n\t" :
124 "=r" (retval) :
125 "r" (__new_ipl), "i" (PSR_PIL) :
126 "g1", "g2");
127 return retval;
128 }
129
130 extern char spdeb_buf[256];
131
132 #define cli() setipl(15)
133 #define sti() setipl(0)
134 #define save_flags(flags) do { flags = getipl(); } while (0)
135 #define restore_flags(flags) setipl(flags)
136
137 #define nop() __asm__ __volatile__ ("nop");
138
139 extern inline unsigned long xchg_u32(volatile unsigned long *m, unsigned long val)
140 {
141 unsigned long flags, retval;
142
143 save_flags(flags); cli();
144 retval = *m;
145 *m = val;
146 restore_flags(flags);
147 return retval;
148 }
149
150 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
151 #define tas(ptr) (xchg((ptr),1))
152
153 extern void __xchg_called_with_bad_pointer(void);
154
155 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
156 {
157 switch (size) {
158 case 4:
159 return xchg_u32(ptr, x);
160 };
161 __xchg_called_with_bad_pointer();
162 return x;
163 }
164
165 #endif
166
167 #endif