Cleaning up and Debugging (Exposed by AcessNative mostly)
[tpg/acess2.git] / Kernel / arch / x86_64 / lib.c
1 /*
2  */
3 #include <acess.h>
4 #include <arch.h>
5
6 #define DEBUG_TO_E9     1
7 #define DEBUG_TO_SERIAL 1
8 #define SERIAL_PORT     0x3F8
9 #define GDB_SERIAL_PORT 0x2F8
10
11 // === IMPORTS ===
12 extern int      GetCPUNum(void);
13 extern void     *Proc_GetCurThread(void);
14
15 // === GLOBALS ===
16  int    gbDebug_SerialSetup = 0;
17  int    gbGDB_SerialSetup = 0;
18
19 // === CODE ===
20 /**
21  * \brief Determine if a short spinlock is locked
22  * \param Lock  Lock pointer
23  */
24 int IS_LOCKED(struct sShortSpinlock *Lock)
25 {
26         return !!Lock->Lock;
27 }
28
29 /**
30  * \brief Check if the current CPU has the lock
31  * \param Lock  Lock pointer
32  */
33 int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
34 {
35         #if STACKED_LOCKS == 1
36         return Lock->Lock == GetCPUNum() + 1;
37         #elif STACKED_LOCKS == 2
38         return Lock->Lock == Proc_GetCurThread();
39         #else
40         return 0;
41         #endif
42 }
43
44 /**
45  * \brief Acquire a Short Spinlock
46  * \param Lock  Lock pointer
47  * 
48  * This type of mutex should only be used for very short sections of code,
49  * or in places where a Mutex_* would be overkill, such as appending
50  * an element to linked list (usually two assignement lines in C)
51  * 
52  * \note This type of lock halts interrupts, so ensure that no timing
53  * functions are called while it is held. As a matter of fact, spend as
54  * little time as possible with this lock held
55  * \note If \a STACKED_LOCKS is set, this type of spinlock can be nested
56  */
57 void SHORTLOCK(struct sShortSpinlock *Lock)
58 {
59          int    v = 1;
60         #if LOCK_DISABLE_INTS
61          int    IF;
62         #endif
63         #if STACKED_LOCKS == 1
64          int    cpu = GetCPUNum() + 1;
65         #elif STACKED_LOCKS == 2
66         void    *thread = Proc_GetCurThread();
67         #endif
68         
69         #if LOCK_DISABLE_INTS
70         // Save interrupt state and clear interrupts
71         __ASM__ ("pushf;\n\tpop %0" : "=r"(IF));
72         IF &= 0x200;    // AND out all but the interrupt flag
73         #endif
74         
75         #if STACKED_LOCKS == 1
76         if( Lock->Lock == cpu ) {
77                 Lock->Depth ++;
78                 return ;
79         }
80         #elif STACKED_LOCKS == 2
81         if( Lock->Lock == thread ) {
82                 Lock->Depth ++;
83                 return ;
84         }
85         #endif
86         
87         // Wait for another CPU to release
88         while(v) {
89                 // CMPXCHG:
90                 //  If r/m32 == EAX, set ZF and set r/m32 = r32
91                 //  Else, clear ZF and set EAX = r/m32
92                 #if STACKED_LOCKS == 1
93                 __ASM__("lock cmpxchgl %2, (%3)"
94                         : "=a"(v)
95                         : "a"(0), "r"(cpu), "r"(&Lock->Lock)
96                         );
97                 #elif STACKED_LOCKS == 2
98                 __ASM__("lock cmpxchgq %2, (%3)"
99                         : "=a"(v)
100                         : "a"(0), "r"(thread), "r"(&Lock->Lock)
101                         );
102                 #else
103                 __ASM__("xchgl %0, (%2)":"=a"(v):"a"(1),"D"(&Lock->Lock));
104                 #endif
105                 
106                 #if LOCK_DISABLE_INTS
107                 if( v ) __ASM__("sti"); // Re-enable interrupts
108                 #endif
109         }
110         
111         #if LOCK_DISABLE_INTS
112         __ASM__("cli");
113         Lock->IF = IF;
114         #endif
115 }
116 /**
117  * \brief Release a short lock
118  * \param Lock  Lock pointer
119  */
120 void SHORTREL(struct sShortSpinlock *Lock)
121 {
122         #if STACKED_LOCKS
123         if( Lock->Depth ) {
124                 Lock->Depth --;
125                 return ;
126         }
127         #endif
128         
129         #if LOCK_DISABLE_INTS
130         // Lock->IF can change anytime once Lock->Lock is zeroed
131         if(Lock->IF) {
132                 Lock->Lock = 0;
133                 __ASM__ ("sti");
134         }
135         else {
136                 Lock->Lock = 0;
137         }
138         #else
139         Lock->Lock = 0;
140         #endif
141 }
142
143 // === DEBUG IO ===
144 int putDebugChar(char ch)
145 {
146         if(!gbGDB_SerialSetup) {
147                 outb(GDB_SERIAL_PORT + 1, 0x00);    // Disable all interrupts
148                 outb(GDB_SERIAL_PORT + 3, 0x80);    // Enable DLAB (set baud rate divisor)
149                 outb(GDB_SERIAL_PORT + 0, 0x0C);    // Set divisor to 12 (lo byte) 9600 baud
150                 outb(GDB_SERIAL_PORT + 1, 0x00);    //  (base is         (hi byte)
151                 outb(GDB_SERIAL_PORT + 3, 0x03);    // 8 bits, no parity, one stop bit (8N1)
152                 outb(GDB_SERIAL_PORT + 2, 0xC7);    // Enable FIFO with 14-byte threshold and clear it
153                 outb(GDB_SERIAL_PORT + 4, 0x0B);    // IRQs enabled, RTS/DSR set
154                 gbDebug_SerialSetup = 1;
155         }
156         while( (inb(GDB_SERIAL_PORT + 5) & 0x20) == 0 );
157         outb(GDB_SERIAL_PORT, ch);
158         return 0;
159 }
160 int getDebugChar(void)
161 {
162         if(!gbGDB_SerialSetup) {
163                 outb(GDB_SERIAL_PORT + 1, 0x00);    // Disable all interrupts
164                 outb(GDB_SERIAL_PORT + 3, 0x80);    // Enable DLAB (set baud rate divisor)
165                 outb(GDB_SERIAL_PORT + 0, 0x0C);    // Set divisor to 12 (lo byte) 9600 baud
166                 outb(GDB_SERIAL_PORT + 1, 0x00);    //                   (hi byte)
167                 outb(GDB_SERIAL_PORT + 3, 0x03);    // 8 bits, no parity, one stop bit
168                 outb(GDB_SERIAL_PORT + 2, 0xC7);    // Enable FIFO with 14-byte threshold and clear it
169                 outb(GDB_SERIAL_PORT + 4, 0x0B);    // IRQs enabled, RTS/DSR set
170                 gbDebug_SerialSetup = 1;
171         }
172         while( (inb(GDB_SERIAL_PORT + 5) & 1) == 0)     ;
173         return inb(GDB_SERIAL_PORT);
174 }
175
176 void Debug_PutCharDebug(char ch)
177 {
178         #if DEBUG_TO_E9
179         __asm__ __volatile__ ( "outb %%al, $0xe9" :: "a"(((Uint8)ch)) );
180         #endif
181         
182         #if DEBUG_TO_SERIAL
183         if(!gbDebug_SerialSetup) {
184                 outb(SERIAL_PORT + 1, 0x00);    // Disable all interrupts
185                 outb(SERIAL_PORT + 3, 0x80);    // Enable DLAB (set baud rate divisor)
186                 outb(SERIAL_PORT + 0, 0x0C);    // Set divisor to 12 (lo byte) 9600 baud
187                 outb(SERIAL_PORT + 1, 0x00);    //                   (hi byte)
188                 outb(SERIAL_PORT + 3, 0x03);    // 8 bits, no parity, one stop bit
189                 outb(SERIAL_PORT + 2, 0xC7);    // Enable FIFO with 14-byte threshold and clear it
190                 outb(SERIAL_PORT + 4, 0x0B);    // IRQs enabled, RTS/DSR set
191                 gbDebug_SerialSetup = 1;
192         }
193         while( (inb(SERIAL_PORT + 5) & 0x20) == 0 );
194         outb(SERIAL_PORT, ch);
195         #endif
196 }
197
198 // === PORT IO ===
199 void outb(Uint16 Port, Uint8 Data)
200 {
201         __asm__ __volatile__ ("outb %%al, %%dx"::"d"(Port),"a"(Data));
202 }
203 void outw(Uint16 Port, Uint16 Data)
204 {
205         __asm__ __volatile__ ("outw %%ax, %%dx"::"d"(Port),"a"(Data));
206 }
207 void outd(Uint16 Port, Uint32 Data)
208 {
209         __asm__ __volatile__ ("outl %%eax, %%dx"::"d"(Port),"a"(Data));
210 }
211 Uint8 inb(Uint16 Port)
212 {
213         Uint8   ret;
214         __asm__ __volatile__ ("inb %%dx, %%al":"=a"(ret):"d"(Port));
215         return ret;
216 }
217 Uint16 inw(Uint16 Port)
218 {
219         Uint16  ret;
220         __asm__ __volatile__ ("inw %%dx, %%ax":"=a"(ret):"d"(Port));
221         return ret;
222 }
223 Uint32 ind(Uint16 Port)
224 {
225         Uint32  ret;
226         __asm__ __volatile__ ("inl %%dx, %%eax":"=a"(ret):"d"(Port));
227         return ret;
228 }
229
230 // === Endianness ===
231 Uint32 BigEndian32(Uint32 Value)
232 {
233         Uint32  ret;
234         ret = (Value >> 24);
235         ret |= ((Value >> 16) & 0xFF) << 8;
236         ret |= ((Value >>  8) & 0xFF) << 16;
237         ret |= ((Value >>  0) & 0xFF) << 24;
238         return ret;
239 }
240
241 Uint16 BigEndian16(Uint16 Value)
242 {
243         return  (Value>>8)|(Value<<8);
244 }
245
246 // === Memory Manipulation ===
247 int memcmp(const void *__dest, const void *__src, size_t __count)
248 {
249         if( ((tVAddr)__dest & 7) != ((tVAddr)__src & 7) ) {
250                 const Uint8     *src = __src, *dst = __dest;
251                 while(__count)
252                 {
253                         if( *src != *dst )
254                                 return *dst - *src;
255                         src ++; dst ++; __count --;
256                 }
257                 return 0;
258         }
259         else {
260                 const Uint8     *src = __src;
261                 const Uint8     *dst = __dest;
262                 const Uint64    *src64, *dst64;
263                 
264                 while( (tVAddr)src & 7 && __count ) {
265                         if( *src != *dst )
266                                 return *dst - *src;
267                         dst ++; src ++; __count --;
268                 }
269
270                 src64 = (void*)src;
271                 dst64 = (void*)dst;
272
273                 while( __count >= 8 )
274                 {
275                         if( *src64 != *dst64 )
276                         {
277                                 src = (void*)src64;
278                                 dst = (void*)dst64;
279                                 if(src[0] != dst[0])    return dst[0]-src[0];
280                                 if(src[1] != dst[1])    return dst[1]-src[1];
281                                 if(src[2] != dst[2])    return dst[2]-src[2];
282                                 if(src[3] != dst[3])    return dst[3]-src[3];
283                                 if(src[4] != dst[4])    return dst[4]-src[4];
284                                 if(src[5] != dst[5])    return dst[5]-src[5];
285                                 if(src[6] != dst[6])    return dst[6]-src[6];
286                                 if(src[7] != dst[7])    return dst[7]-src[7];
287                                 return -1;      // This should never happen
288                         }
289                         __count -= 8;
290                         src64 ++;
291                         dst64 ++;
292                 }
293
294                 src = (void*)src64;
295                 dst = (void*)dst64;
296                 while( __count-- )
297                 {
298                         if(*dst != *src)        return *dst - *src;
299                         dst ++;
300                         src ++;
301                 }
302         }
303         return 0;
304 }
305
306 void *memcpy(void *__dest, const void *__src, size_t __count)
307 {
308         if( ((tVAddr)__dest & 7) != ((tVAddr)__src & 7) )
309                 __asm__ __volatile__ ("rep movsb" : : "D"(__dest),"S"(__src),"c"(__count));
310         else {
311                 const Uint8     *src = __src;
312                 Uint8   *dst = __dest;
313                 while( (tVAddr)src & 7 && __count ) {
314                         *dst++ = *src++;
315                         __count --;
316                 }
317
318                 __asm__ __volatile__ ("rep movsq" : : "D"(dst),"S"(src),"c"(__count/8));
319                 src += __count & ~7;
320                 dst += __count & ~7;
321                 __count = __count & 7;
322                 while( __count-- )
323                         *dst++ = *src++;
324         }
325         return __dest;
326 }
327
328 void *memset(void *__dest, int __val, size_t __count)
329 {
330         if( __val != 0 || ((tVAddr)__dest & 7) != 0 )
331                 __asm__ __volatile__ ("rep stosb" : : "D"(__dest),"a"(__val),"c"(__count));
332         else {
333                 Uint8   *dst = __dest;
334
335                 __asm__ __volatile__ ("rep stosq" : : "D"(dst),"a"(0),"c"(__count/8));
336                 dst += __count & ~7;
337                 __count = __count & 7;
338                 while( __count-- )
339                         *dst++ = 0;
340         }
341         return __dest;
342 }
343
344 void *memsetd(void *__dest, Uint32 __val, size_t __count)
345 {
346         __asm__ __volatile__ ("rep stosl" : : "D"(__dest),"a"(__val),"c"(__count));
347         return __dest;
348 }
349

UCC git Repository :: git.ucc.asn.au