Kernel/x86 - Fixing build
[tpg/acess2.git] / Kernel / arch / x86 / lib.c
index 0b802d1..3858eff 100644 (file)
@@ -45,13 +45,7 @@ int IS_LOCKED(struct sShortSpinlock *Lock)
  */
 int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
 {
-       #if STACKED_LOCKS == 1
        return Lock->Lock == GetCPUNum() + 1;
-       #elif STACKED_LOCKS == 2
-       return Lock->Lock == Proc_GetCurThread();
-       #else
-       return 0;
-       #endif
 }
 
 /**
@@ -70,32 +64,12 @@ int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
 void SHORTLOCK(struct sShortSpinlock *Lock)
 {
         int    v = 1;
-       #if LOCK_DISABLE_INTS
         int    IF;
-       #endif
-       #if STACKED_LOCKS == 1
         int    cpu = GetCPUNum() + 1;
-       #elif STACKED_LOCKS == 2
-       void    *thread = Proc_GetCurThread();
-       #endif
        
-       #if LOCK_DISABLE_INTS
        // Save interrupt state
        __ASM__ ("pushf;\n\tpop %0" : "=r"(IF));
        IF &= 0x200;    // AND out all but the interrupt flag
-       #endif
-       
-       #if STACKED_LOCKS == 1
-       if( Lock->Lock == cpu ) {
-               Lock->Depth ++;
-               return ;
-       }
-       #elif STACKED_LOCKS == 2
-       if( Lock->Lock == thread ) {
-               Lock->Depth ++;
-               return ;
-       }
-       #endif
        
        #if TRACE_LOCKS
        if( Lock != &glDebug_Lock && Lock != &glThreadListLock )
@@ -105,34 +79,17 @@ void SHORTLOCK(struct sShortSpinlock *Lock)
        }
        #endif
        
+       __ASM__("cli");
+       
        // Wait for another CPU to release
-       while(v) {
-               // CMPXCHG:
-               //  If r/m32 == EAX, set ZF and set r/m32 = r32
-               //  Else, clear ZF and set EAX = r/m32
-               #if STACKED_LOCKS == 1
-               __ASM__("lock cmpxchgl %2, (%3)"
-                       : "=a"(v)
-                       : "a"(0), "r"(cpu), "r"(&Lock->Lock)
-                       );
-               #elif STACKED_LOCKS == 2
-               __ASM__("lock cmpxchgl %2, (%3)"
-                       : "=a"(v)
-                       : "a"(0), "r"(thread), "r"(&Lock->Lock)
-                       );
-               #else
-               __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock));
-               #endif
-               
-               #if LOCK_DISABLE_INTS
-               if( v ) __ASM__("sti"); // Re-enable interrupts
-               #endif
-       }
+       __ASM__(
+               "1: lock cmpxchgl %2, (%3)\n\t"
+               "jnz 1b"
+               : "=a"(v)
+               : "a"(0), "r"(cpu), "r"(&Lock->Lock)
+               );
        
-       #if LOCK_DISABLE_INTS
-       __ASM__("cli");
        Lock->IF = IF;
-       #endif
        
        #if TRACE_LOCKS
        if( Lock != &glDebug_Lock && Lock != &glThreadListLock )
@@ -149,13 +106,6 @@ void SHORTLOCK(struct sShortSpinlock *Lock)
  */
 void SHORTREL(struct sShortSpinlock *Lock)
 {      
-       #if STACKED_LOCKS
-       if( Lock->Depth ) {
-               Lock->Depth --;
-               return ;
-       }
-       #endif
-       
        #if TRACE_LOCKS
        if( Lock != &glDebug_Lock && Lock != &glThreadListLock )
        {
@@ -164,7 +114,6 @@ void SHORTREL(struct sShortSpinlock *Lock)
        }
        #endif
        
-       #if LOCK_DISABLE_INTS
        // Lock->IF can change anytime once Lock->Lock is zeroed
        if(Lock->IF) {
                Lock->Lock = 0;
@@ -173,9 +122,6 @@ void SHORTREL(struct sShortSpinlock *Lock)
        else {
                Lock->Lock = 0;
        }
-       #else
-       Lock->Lock = 0;
-       #endif
 }
 
 // === DEBUG IO ===
@@ -324,11 +270,46 @@ int memcmp(const void *m1, const void *m2, size_t Num)
  */
 void *memcpy(void *Dest, const void *Src, size_t Num)
 {
-       if( ((Uint)Dest & 3) || ((Uint)Src & 3) ) {
-               __asm__ __volatile__ ("rep movsb" :: "D" (Dest), "S" (Src), "c" (Num));
+       tVAddr  dst = (tVAddr)Dest;
+       tVAddr  src = (tVAddr)Src;
+       if( (dst & 3) != (src & 3) )
+       {
+               __asm__ __volatile__ ("rep movsb" :: "D" (dst), "S" (src), "c" (Num));
 //             Debug("\nmemcpy:Num=0x%x by %p (UA)", Num, __builtin_return_address(0));
        }
-       else {
+       #if 1
+       else if( Num > 128 && (dst & 15) == (src & 15) )
+       {
+               char    tmp[16+15];     // Note, this is a hack to save/restor xmm0
+                int    count = 16 - (dst & 15);
+//             Debug("\nmemcpy:Num=0x%x by %p (SSE)", Num, __builtin_return_address(0));
+               if( count < 16 )
+               {
+                       Num -= count;
+                       __asm__ __volatile__ ("rep movsb" : "=D"(dst),"=S"(src): "0"(dst), "1"(src), "c"(count));
+               }
+               
+               count = Num / 16;
+               __asm__ __volatile__ (
+                       "movdqa 0(%5), %%xmm0;\n\t"
+                       "1:\n\t"
+                       "movdqa 0(%1), %%xmm0;\n\t"
+                       "movdqa %%xmm0, 0(%0);\n\t"
+                       "add $16,%0;\n\t"
+                       "add $16,%1;\n\t"
+                       "loop 1b;\n\t"
+                       "movdqa %%xmm0, 0(%5);\n\t"
+                       : "=r"(dst),"=r"(src)
+                       : "0"(dst), "1"(src), "c"(count), "r" (((tVAddr)tmp+15)&~15)
+                       );
+
+               count = Num & 15;
+               if(count)
+                       __asm__ __volatile__ ("rep movsb" :: "D"(dst), "S"(src), "c"(count));
+       }
+       #endif
+       else
+       {
 //             Debug("\nmemcpy:Num=0x%x by %p", Num, __builtin_return_address(0));
                __asm__ __volatile__ (
                        "rep movsl;\n\t"
@@ -338,6 +319,7 @@ void *memcpy(void *Dest, const void *Src, size_t Num)
        }
        return Dest;
 }
+
 /**
  * \fn void *memcpyd(void *Dest, const void *Src, size_t Num)
  * \brief Copy \a Num DWORDs from \a Src to \a Dest

UCC git Repository :: git.ucc.asn.au