da9955c9d4dc02530402b157799293390456cd9d
[tpg/acess2.git] / Kernel / arch / x86_64 / lib.c
1 /*
2  */
3 #include <acess.h>
4 #include <arch.h>
5
6 // === IMPORTS ===
7 extern int      GetCPUNum(void);
8 extern void     *Proc_GetCurThread(void);
9
10 // === CODE ===
11 /**
12  * \brief Determine if a short spinlock is locked
13  * \param Lock  Lock pointer
14  */
15 int IS_LOCKED(struct sShortSpinlock *Lock)
16 {
17         return !!Lock->Lock;
18 }
19
20 /**
21  * \brief Check if the current CPU has the lock
22  * \param Lock  Lock pointer
23  */
24 int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
25 {
26         #if STACKED_LOCKS == 1
27         return Lock->Lock == GetCPUNum() + 1;
28         #elif STACKED_LOCKS == 2
29         return Lock->Lock == Proc_GetCurThread();
30         #else
31         return 0;
32         #endif
33 }
34
35 /**
36  * \brief Acquire a Short Spinlock
37  * \param Lock  Lock pointer
38  * 
39  * This type of mutex should only be used for very short sections of code,
40  * or in places where a Mutex_* would be overkill, such as appending
41  * an element to linked list (usually two assignement lines in C)
42  * 
43  * \note This type of lock halts interrupts, so ensure that no timing
44  * functions are called while it is held. As a matter of fact, spend as
45  * little time as possible with this lock held
46  * \note If \a STACKED_LOCKS is set, this type of spinlock can be nested
47  */
48 void SHORTLOCK(struct sShortSpinlock *Lock)
49 {
50          int    v = 1;
51         #if LOCK_DISABLE_INTS
52          int    IF;
53         #endif
54         #if STACKED_LOCKS == 1
55          int    cpu = GetCPUNum() + 1;
56         #elif STACKED_LOCKS == 2
57         void    *thread = Proc_GetCurThread();
58         #endif
59         
60         #if LOCK_DISABLE_INTS
61         // Save interrupt state and clear interrupts
62         __ASM__ ("pushf;\n\tpop %0" : "=r"(IF));
63         IF &= 0x200;    // AND out all but the interrupt flag
64         #endif
65         
66         #if STACKED_LOCKS == 1
67         if( Lock->Lock == cpu ) {
68                 Lock->Depth ++;
69                 return ;
70         }
71         #elif STACKED_LOCKS == 2
72         if( Lock->Lock == thread ) {
73                 Lock->Depth ++;
74                 return ;
75         }
76         #endif
77         
78         // Wait for another CPU to release
79         while(v) {
80                 // CMPXCHG:
81                 //  If r/m32 == EAX, set ZF and set r/m32 = r32
82                 //  Else, clear ZF and set EAX = r/m32
83                 #if STACKED_LOCKS == 1
84                 __ASM__("lock cmpxchgl %2, (%3)"
85                         : "=a"(v)
86                         : "a"(0), "r"(cpu), "r"(&Lock->Lock)
87                         );
88                 #elif STACKED_LOCKS == 2
89                 __ASM__("lock cmpxchgq %2, (%3)"
90                         : "=a"(v)
91                         : "a"(0), "r"(thread), "r"(&Lock->Lock)
92                         );
93                 #else
94                 __ASM__("xchgl %0, (%2)":"=a"(v):"a"(1),"D"(&Lock->Lock));
95                 #endif
96                 
97                 #if LOCK_DISABLE_INTS
98                 if( v ) __ASM__("sti"); // Re-enable interrupts
99                 #endif
100         }
101         
102         #if LOCK_DISABLE_INTS
103         __ASM__("cli");
104         Lock->IF = IF;
105         #endif
106 }
107 /**
108  * \brief Release a short lock
109  * \param Lock  Lock pointer
110  */
111 void SHORTREL(struct sShortSpinlock *Lock)
112 {
113         #if STACKED_LOCKS
114         if( Lock->Depth ) {
115                 Lock->Depth --;
116                 return ;
117         }
118         #endif
119         
120         #if LOCK_DISABLE_INTS
121         // Lock->IF can change anytime once Lock->Lock is zeroed
122         if(Lock->IF) {
123                 Lock->Lock = 0;
124                 __ASM__ ("sti");
125         }
126         else {
127                 Lock->Lock = 0;
128         }
129         #else
130         Lock->Lock = 0;
131         #endif
132 }
133
134 void outb(Uint16 Port, Uint8 Data)
135 {
136         __asm__ __volatile__ ("outb %%al, %%dx"::"d"(Port),"a"(Data));
137 }
138 void outw(Uint16 Port, Uint16 Data)
139 {
140         __asm__ __volatile__ ("outw %%ax, %%dx"::"d"(Port),"a"(Data));
141 }
142 void outd(Uint16 Port, Uint32 Data)
143 {
144         __asm__ __volatile__ ("outl %%eax, %%dx"::"d"(Port),"a"(Data));
145 }
146 Uint8 inb(Uint16 Port)
147 {
148         Uint8   ret;
149         __asm__ __volatile__ ("inb %%dx, %%al":"=a"(ret):"d"(Port));
150         return ret;
151 }
152 Uint16 inw(Uint16 Port)
153 {
154         Uint16  ret;
155         __asm__ __volatile__ ("inw %%dx, %%ax":"=a"(ret):"d"(Port));
156         return ret;
157 }
158 Uint32 ind(Uint16 Port)
159 {
160         Uint32  ret;
161         __asm__ __volatile__ ("inl %%dx, %%eax":"=a"(ret):"d"(Port));
162         return ret;
163 }
164
165 // === Endianness ===
166 Uint32 BigEndian32(Uint32 Value)
167 {
168         Uint32  ret;
169         ret = (Value >> 24);
170         ret |= ((Value >> 16) & 0xFF) << 8;
171         ret |= ((Value >>  8) & 0xFF) << 16;
172         ret |= ((Value >>  0) & 0xFF) << 24;
173         return ret;
174 }
175
176 Uint16 BigEndian16(Uint16 Value)
177 {
178         return  (Value>>8)|(Value<<8);
179 }
180
181 // === Memory Manipulation ===
182 int memcmp(const void *__dest, const void *__src, size_t __count)
183 {
184         if( ((tVAddr)__dest & 7) != ((tVAddr)__src & 7) ) {
185                 const Uint8     *src = __src, *dst = __dest;
186                 while(__count)
187                 {
188                         if( *src != *dst )
189                                 return *dst - *src;
190                         src ++; dst ++; __count --;
191                 }
192                 return 0;
193         }
194         else {
195                 const Uint8     *src = __src;
196                 const Uint8     *dst = __dest;
197                 const Uint64    *src64, *dst64;
198                 
199                 while( (tVAddr)src & 7 && __count ) {
200                         if( *src != *dst )
201                                 return *dst - *src;
202                         dst ++; src ++; __count --;
203                 }
204
205                 src64 = (void*)src;
206                 dst64 = (void*)dst;
207
208                 while( __count >= 8 )
209                 {
210                         if( *src64 != *dst64 )
211                         {
212                                 src = (void*)src64;
213                                 dst = (void*)dst64;
214                                 if(src[0] != dst[0])    return dst[0]-src[0];
215                                 if(src[1] != dst[1])    return dst[1]-src[1];
216                                 if(src[2] != dst[2])    return dst[2]-src[2];
217                                 if(src[3] != dst[3])    return dst[3]-src[3];
218                                 if(src[4] != dst[4])    return dst[4]-src[4];
219                                 if(src[5] != dst[5])    return dst[5]-src[5];
220                                 if(src[6] != dst[6])    return dst[6]-src[6];
221                                 if(src[7] != dst[7])    return dst[7]-src[7];
222                                 return -1;      // This should never happen
223                         }
224                         __count -= 8;
225                         src64 ++;
226                         dst64 ++;
227                 }
228
229                 src = (void*)src64;
230                 dst = (void*)dst64;
231                 while( __count-- )
232                 {
233                         if(*dst != *src)        return *dst - *src;
234                         dst ++;
235                         src ++;
236                 }
237         }
238         return 0;
239 }
240
241 void *memcpy(void *__dest, const void *__src, size_t __count)
242 {
243         if( ((tVAddr)__dest & 7) != ((tVAddr)__src & 7) )
244                 __asm__ __volatile__ ("rep movsb" : : "D"(__dest),"S"(__src),"c"(__count));
245         else {
246                 const Uint8     *src = __src;
247                 Uint8   *dst = __dest;
248                 while( (tVAddr)src & 7 && __count ) {
249                         *dst++ = *src++;
250                         __count --;
251                 }
252
253                 __asm__ __volatile__ ("rep movsq" : : "D"(dst),"S"(src),"c"(__count/8));
254                 src += __count & ~7;
255                 dst += __count & ~7;
256                 __count = __count & 7;
257                 while( __count-- )
258                         *dst++ = *src++;
259         }
260         return __dest;
261 }
262
263 void *memset(void *__dest, int __val, size_t __count)
264 {
265         if( __val != 0 || ((tVAddr)__dest & 7) != 0 )
266                 __asm__ __volatile__ ("rep stosb" : : "D"(__dest),"a"(__val),"c"(__count));
267         else {
268                 Uint8   *dst = __dest;
269
270                 __asm__ __volatile__ ("rep stosq" : : "D"(dst),"a"(0),"c"(__count/8));
271                 dst += __count & ~7;
272                 __count = __count & 7;
273                 while( __count-- )
274                         *dst++ = 0;
275         }
276         return __dest;
277 }
278
279 void *memsetd(void *__dest, Uint32 __val, size_t __count)
280 {
281         __asm__ __volatile__ ("rep stosl" : : "D"(__dest),"a"(__val),"c"(__count));
282         return __dest;
283 }
284

UCC git Repository :: git.ucc.asn.au