Kernel/x86 - Fixing build
[tpg/acess2.git] / Kernel / arch / x86 / lib.c
index a3f98be..3858eff 100644 (file)
@@ -1,6 +1,8 @@
 /*
- * AcessOS Microkernel Version
- * lib.c
+ * Acess2
+ *
+ * arch/x86/lib.c
+ * - General arch-specific stuff
  */
 #include <acess.h>
 #include <threads_int.h>
@@ -15,6 +17,7 @@
 // === IMPRORTS ===
 #if TRACE_LOCKS
 extern struct sShortSpinlock   glDebug_Lock;
+extern struct sShortSpinlock   glThreadListLock;
 #endif
 extern int     GetCPUNum(void);
 
@@ -42,13 +45,7 @@ int IS_LOCKED(struct sShortSpinlock *Lock)
  */
 int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
 {
-       #if STACKED_LOCKS == 1
        return Lock->Lock == GetCPUNum() + 1;
-       #elif STACKED_LOCKS == 2
-       return Lock->Lock == Proc_GetCurThread();
-       #else
-       return 0;
-       #endif
 }
 
 /**
@@ -67,67 +64,39 @@ int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
 void SHORTLOCK(struct sShortSpinlock *Lock)
 {
         int    v = 1;
-       #if LOCK_DISABLE_INTS
         int    IF;
-       #endif
-       #if STACKED_LOCKS == 1
         int    cpu = GetCPUNum() + 1;
-       #elif STACKED_LOCKS == 2
-       void    *thread = Proc_GetCurThread();
-       #endif
        
-       #if LOCK_DISABLE_INTS
        // Save interrupt state
        __ASM__ ("pushf;\n\tpop %0" : "=r"(IF));
        IF &= 0x200;    // AND out all but the interrupt flag
-       #endif
        
-       #if STACKED_LOCKS == 1
-       if( Lock->Lock == cpu ) {
-               Lock->Depth ++;
-               return ;
-       }
-       #elif STACKED_LOCKS == 2
-       if( Lock->Lock == thread ) {
-               Lock->Depth ++;
-               return ;
+       #if TRACE_LOCKS
+       if( Lock != &glDebug_Lock && Lock != &glThreadListLock )
+       {
+               //Log_Log("LOCK", "%p locked by %p", Lock, __builtin_return_address(0));
+               Debug("%p obtaining %p (Called by %p)", __builtin_return_address(0), Lock, __builtin_return_address(1));
        }
        #endif
        
+       __ASM__("cli");
+       
        // Wait for another CPU to release
-       while(v) {
-               // CMPXCHG:
-               //  If r/m32 == EAX, set ZF and set r/m32 = r32
-               //  Else, clear ZF and set EAX = r/m32
-               #if STACKED_LOCKS == 1
-               __ASM__("lock cmpxchgl %2, (%3)"
-                       : "=a"(v)
-                       : "a"(0), "r"(cpu), "r"(&Lock->Lock)
-                       );
-               #elif STACKED_LOCKS == 2
-               __ASM__("lock cmpxchgl %2, (%3)"
-                       : "=a"(v)
-                       : "a"(0), "r"(thread), "r"(&Lock->Lock)
-                       );
-               #else
-               __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock));
-               #endif
-               
-               #if LOCK_DISABLE_INTS
-               if( v ) __ASM__("sti"); // Re-enable interrupts
-               #endif
-       }
+       __ASM__(
+               "1: lock cmpxchgl %2, (%3)\n\t"
+               "jnz 1b"
+               : "=a"(v)
+               : "a"(0), "r"(cpu), "r"(&Lock->Lock)
+               );
        
-       #if LOCK_DISABLE_INTS
-       __ASM__("cli");
        Lock->IF = IF;
-       #endif
        
        #if TRACE_LOCKS
-       if( Lock != &glDebug_Lock )
+       if( Lock != &glDebug_Lock && Lock != &glThreadListLock )
        {
                //Log_Log("LOCK", "%p locked by %p", Lock, __builtin_return_address(0));
-               LogF("Lock %p locked by %p\n", Lock, __builtin_return_address(0));
+               //Debug("Lock %p locked by %p\t%p", Lock, __builtin_return_address(0), __builtin_return_address(1));
+               Debug("got it");
        }
        #endif
 }
@@ -137,22 +106,14 @@ void SHORTLOCK(struct sShortSpinlock *Lock)
  */
 void SHORTREL(struct sShortSpinlock *Lock)
 {      
-       #if STACKED_LOCKS
-       if( Lock->Depth ) {
-               Lock->Depth --;
-               return ;
-       }
-       #endif
-       
        #if TRACE_LOCKS
-       if( Lock != &glDebug_Lock )
+       if( Lock != &glDebug_Lock && Lock != &glThreadListLock )
        {
                //Log_Log("LOCK", "%p released by %p", Lock, __builtin_return_address(0));
-               LogF("Lock %p released by %p\n", Lock, __builtin_return_address(0));
+               Debug("Lock %p released by %p\t%p", Lock, __builtin_return_address(0), __builtin_return_address(1));
        }
        #endif
        
-       #if LOCK_DISABLE_INTS
        // Lock->IF can change anytime once Lock->Lock is zeroed
        if(Lock->IF) {
                Lock->Lock = 0;
@@ -161,9 +122,6 @@ void SHORTREL(struct sShortSpinlock *Lock)
        else {
                Lock->Lock = 0;
        }
-       #else
-       Lock->Lock = 0;
-       #endif
 }
 
 // === DEBUG IO ===
@@ -312,9 +270,47 @@ int memcmp(const void *m1, const void *m2, size_t Num)
  */
 void *memcpy(void *Dest, const void *Src, size_t Num)
 {
-       if( ((Uint)Dest & 3) || ((Uint)Src & 3) )
-               __asm__ __volatile__ ("rep movsb" :: "D" (Dest), "S" (Src), "c" (Num));
-       else {
+       tVAddr  dst = (tVAddr)Dest;
+       tVAddr  src = (tVAddr)Src;
+       if( (dst & 3) != (src & 3) )
+       {
+               __asm__ __volatile__ ("rep movsb" :: "D" (dst), "S" (src), "c" (Num));
+//             Debug("\nmemcpy:Num=0x%x by %p (UA)", Num, __builtin_return_address(0));
+       }
+       #if 1
+       else if( Num > 128 && (dst & 15) == (src & 15) )
+       {
+               char    tmp[16+15];     // Note, this is a hack to save/restor xmm0
+                int    count = 16 - (dst & 15);
+//             Debug("\nmemcpy:Num=0x%x by %p (SSE)", Num, __builtin_return_address(0));
+               if( count < 16 )
+               {
+                       Num -= count;
+                       __asm__ __volatile__ ("rep movsb" : "=D"(dst),"=S"(src): "0"(dst), "1"(src), "c"(count));
+               }
+               
+               count = Num / 16;
+               __asm__ __volatile__ (
+                       "movdqa 0(%5), %%xmm0;\n\t"
+                       "1:\n\t"
+                       "movdqa 0(%1), %%xmm0;\n\t"
+                       "movdqa %%xmm0, 0(%0);\n\t"
+                       "add $16,%0;\n\t"
+                       "add $16,%1;\n\t"
+                       "loop 1b;\n\t"
+                       "movdqa %%xmm0, 0(%5);\n\t"
+                       : "=r"(dst),"=r"(src)
+                       : "0"(dst), "1"(src), "c"(count), "r" (((tVAddr)tmp+15)&~15)
+                       );
+
+               count = Num & 15;
+               if(count)
+                       __asm__ __volatile__ ("rep movsb" :: "D"(dst), "S"(src), "c"(count));
+       }
+       #endif
+       else
+       {
+//             Debug("\nmemcpy:Num=0x%x by %p", Num, __builtin_return_address(0));
                __asm__ __volatile__ (
                        "rep movsl;\n\t"
                        "mov %3, %%ecx;\n\t"
@@ -323,6 +319,7 @@ void *memcpy(void *Dest, const void *Src, size_t Num)
        }
        return Dest;
 }
+
 /**
  * \fn void *memcpyd(void *Dest, const void *Src, size_t Num)
  * \brief Copy \a Num DWORDs from \a Src to \a Dest
@@ -333,6 +330,25 @@ void *memcpyd(void *Dest, const void *Src, size_t Num)
        return Dest;
 }
 
+Uint64 DivMod64U(Uint64 Num, Uint64 Div, Uint64 *Rem)
+{
+       Uint64  ret;
+       if( Div < 0x100000000ULL && Num < 0xFFFFFFFF * Div ) {
+               Uint32  rem, ret_32;
+               __asm__ __volatile__(
+                       "div %4"
+                       : "=a" (ret_32), "=d" (rem)
+                       : "a" ( (Uint32)(Num & 0xFFFFFFFF) ), "d" ((Uint32)(Num >> 32)), "r" (Div)
+                       );
+               if(Rem) *Rem = rem;
+               return ret_32;
+       }
+
+       ret = __udivdi3(Num, Div);
+       if(Rem) *Rem = __umoddi3(Num, Div);
+       return ret;
+}
+
 /**
  * \fn Uint64 __udivdi3(Uint64 Num, Uint64 Den)
  * \brief Divide two 64-bit integers

UCC git Repository :: git.ucc.asn.au