X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=Kernel%2Farch%2Fx86%2Flib.c;h=0471deba284fbbaabe830e0339ad51609e3b2ba2;hb=c5de7b08d4a82908a5acf6454fd7836bdab68715;hp=969332b09e810a2f919c42b12788d108dc4eb6ee;hpb=54bf151b1a05b74debdb5f3baec02c18406b74d1;p=tpg%2Facess2.git diff --git a/Kernel/arch/x86/lib.c b/Kernel/arch/x86/lib.c index 969332b0..0471deba 100644 --- a/Kernel/arch/x86/lib.c +++ b/Kernel/arch/x86/lib.c @@ -1,6 +1,8 @@ /* - * AcessOS Microkernel Version - * lib.c + * Acess2 + * + * arch/x86/lib.c + * - General arch-specific stuff */ #include #include @@ -15,6 +17,7 @@ // === IMPRORTS === #if TRACE_LOCKS extern struct sShortSpinlock glDebug_Lock; +extern struct sShortSpinlock glThreadListLock; #endif extern int GetCPUNum(void); @@ -42,13 +45,7 @@ int IS_LOCKED(struct sShortSpinlock *Lock) */ int CPU_HAS_LOCK(struct sShortSpinlock *Lock) { - #if STACKED_LOCKS == 1 return Lock->Lock == GetCPUNum() + 1; - #elif STACKED_LOCKS == 2 - return Lock->Lock == Proc_GetCurThread(); - #else - return 0; - #endif } /** @@ -67,67 +64,36 @@ int CPU_HAS_LOCK(struct sShortSpinlock *Lock) void SHORTLOCK(struct sShortSpinlock *Lock) { int v = 1; - #if LOCK_DISABLE_INTS int IF; - #endif - #if STACKED_LOCKS == 1 int cpu = GetCPUNum() + 1; - #elif STACKED_LOCKS == 2 - void *thread = Proc_GetCurThread(); - #endif - #if LOCK_DISABLE_INTS // Save interrupt state __ASM__ ("pushf;\n\tpop %0" : "=r"(IF)); IF &= 0x200; // AND out all but the interrupt flag - #endif - #if STACKED_LOCKS == 1 - if( Lock->Lock == cpu ) { - Lock->Depth ++; - return ; - } - #elif STACKED_LOCKS == 2 - if( Lock->Lock == thread ) { - Lock->Depth ++; - return ; + #if TRACE_LOCKS + if( Lock != &glDebug_Lock && Lock != &glThreadListLock ) + { + //Log_Log("LOCK", "%p locked by %p", Lock, __builtin_return_address(0)); + Debug("%p obtaining %p (Called by %p)", __builtin_return_address(0), Lock, __builtin_return_address(1)); } #endif + __ASM__("cli"); + // Wait for another CPU to release while(v) { - // CMPXCHG: - // If r/m32 == EAX, set ZF and set r/m32 = r32 - // Else, clear ZF and set EAX = r/m32 - #if STACKED_LOCKS == 1 - __ASM__("lock cmpxchgl %2, (%3)" - : "=a"(v) - : "a"(0), "r"(cpu), "r"(&Lock->Lock) - ); - #elif STACKED_LOCKS == 2 - __ASM__("lock cmpxchgl %2, (%3)" - : "=a"(v) - : "a"(0), "r"(thread), "r"(&Lock->Lock) - ); - #else - __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock)); - #endif - - #if LOCK_DISABLE_INTS - if( v ) __ASM__("sti"); // Re-enable interrupts - #endif + __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(cpu),"D"(&Lock->Lock)); } - #if LOCK_DISABLE_INTS - __ASM__("cli"); Lock->IF = IF; - #endif #if TRACE_LOCKS - if( Lock != &glDebug_Lock ) + if( Lock != &glDebug_Lock && Lock != &glThreadListLock ) { //Log_Log("LOCK", "%p locked by %p", Lock, __builtin_return_address(0)); - LogF("Lock %p locked by %p\n", Lock, __builtin_return_address(0)); + //Debug("Lock %p locked by %p\t%p", Lock, __builtin_return_address(0), __builtin_return_address(1)); + Debug("got it"); } #endif } @@ -137,22 +103,14 @@ void SHORTLOCK(struct sShortSpinlock *Lock) */ void SHORTREL(struct sShortSpinlock *Lock) { - #if STACKED_LOCKS - if( Lock->Depth ) { - Lock->Depth --; - return ; - } - #endif - #if TRACE_LOCKS - if( Lock != &glDebug_Lock ) + if( Lock != &glDebug_Lock && Lock != &glThreadListLock ) { //Log_Log("LOCK", "%p released by %p", Lock, __builtin_return_address(0)); - LogF("Lock %p released by %p\n", Lock, __builtin_return_address(0)); + Debug("Lock %p released by %p\t%p", Lock, __builtin_return_address(0), __builtin_return_address(1)); } #endif - #if LOCK_DISABLE_INTS // Lock->IF can change anytime once Lock->Lock is zeroed if(Lock->IF) { Lock->Lock = 0; @@ -161,9 +119,6 @@ void SHORTREL(struct sShortSpinlock *Lock) else { Lock->Lock = 0; } - #else - Lock->Lock = 0; - #endif } // === DEBUG IO === @@ -178,7 +133,7 @@ int putDebugChar(char ch) outb(GDB_SERIAL_PORT + 3, 0x03); // 8 bits, no parity, one stop bit (8N1) outb(GDB_SERIAL_PORT + 2, 0xC7); // Enable FIFO with 14-byte threshold and clear it outb(GDB_SERIAL_PORT + 4, 0x0B); // IRQs enabled, RTS/DSR set - gbDebug_SerialSetup = 1; + gbGDB_SerialSetup = 1; } while( (inb(GDB_SERIAL_PORT + 5) & 0x20) == 0 ); outb(GDB_SERIAL_PORT, ch); @@ -194,7 +149,7 @@ int getDebugChar(void) outb(GDB_SERIAL_PORT + 3, 0x03); // 8 bits, no parity, one stop bit outb(GDB_SERIAL_PORT + 2, 0xC7); // Enable FIFO with 14-byte threshold and clear it outb(GDB_SERIAL_PORT + 4, 0x0B); // IRQs enabled, RTS/DSR set - gbDebug_SerialSetup = 1; + gbGDB_SerialSetup = 1; } while( (inb(GDB_SERIAL_PORT + 5) & 1) == 0) ; return inb(GDB_SERIAL_PORT); @@ -312,9 +267,47 @@ int memcmp(const void *m1, const void *m2, size_t Num) */ void *memcpy(void *Dest, const void *Src, size_t Num) { - if( ((Uint)Dest & 3) || ((Uint)Src & 3) ) - __asm__ __volatile__ ("rep movsb" :: "D" (Dest), "S" (Src), "c" (Num)); - else { + tVAddr dst = (tVAddr)Dest; + tVAddr src = (tVAddr)Src; + if( (dst & 3) != (src & 3) ) + { + __asm__ __volatile__ ("rep movsb" :: "D" (dst), "S" (src), "c" (Num)); +// Debug("\nmemcpy:Num=0x%x by %p (UA)", Num, __builtin_return_address(0)); + } + #if 1 + else if( Num > 128 && (dst & 15) == (src & 15) ) + { + char tmp[16+15]; // Note, this is a hack to save/restor xmm0 + int count = 16 - (dst & 15); +// Debug("\nmemcpy:Num=0x%x by %p (SSE)", Num, __builtin_return_address(0)); + if( count < 16 ) + { + Num -= count; + __asm__ __volatile__ ("rep movsb" : "=D"(dst),"=S"(src): "0"(dst), "1"(src), "c"(count)); + } + + count = Num / 16; + __asm__ __volatile__ ( + "movdqa 0(%5), %%xmm0;\n\t" + "1:\n\t" + "movdqa 0(%1), %%xmm0;\n\t" + "movdqa %%xmm0, 0(%0);\n\t" + "add $16,%0;\n\t" + "add $16,%1;\n\t" + "loop 1b;\n\t" + "movdqa %%xmm0, 0(%5);\n\t" + : "=r"(dst),"=r"(src) + : "0"(dst), "1"(src), "c"(count), "r" (((tVAddr)tmp+15)&~15) + ); + + count = Num & 15; + if(count) + __asm__ __volatile__ ("rep movsb" :: "D"(dst), "S"(src), "c"(count)); + } + #endif + else + { +// Debug("\nmemcpy:Num=0x%x by %p", Num, __builtin_return_address(0)); __asm__ __volatile__ ( "rep movsl;\n\t" "mov %3, %%ecx;\n\t" @@ -323,6 +316,7 @@ void *memcpy(void *Dest, const void *Src, size_t Num) } return Dest; } + /** * \fn void *memcpyd(void *Dest, const void *Src, size_t Num) * \brief Copy \a Num DWORDs from \a Src to \a Dest @@ -333,6 +327,25 @@ void *memcpyd(void *Dest, const void *Src, size_t Num) return Dest; } +Uint64 DivMod64U(Uint64 Num, Uint64 Div, Uint64 *Rem) +{ + Uint64 ret; + if( Div < 0x100000000ULL && Num < 0xFFFFFFFF * Div ) { + Uint32 rem, ret_32; + __asm__ __volatile__( + "div %4" + : "=a" (ret_32), "=d" (rem) + : "a" ( (Uint32)(Num & 0xFFFFFFFF) ), "d" ((Uint32)(Num >> 32)), "r" (Div) + ); + if(Rem) *Rem = rem; + return ret_32; + } + + ret = __udivdi3(Num, Div); + if(Rem) *Rem = __umoddi3(Num, Div); + return ret; +} + /** * \fn Uint64 __udivdi3(Uint64 Num, Uint64 Den) * \brief Divide two 64-bit integers @@ -424,22 +437,6 @@ Uint64 __umoddi3(Uint64 Num, Uint64 Den) return Num - __udivdi3(Num, Den) * Den; } -Uint16 LittleEndian16(Uint16 Val) -{ - return Val; -} -Uint16 BigEndian16(Uint16 Val) -{ - return ((Val&0xFF)<<8) | ((Val>>8)&0xFF); -} -Uint32 LittleEndian32(Uint32 Val) -{ - return Val; -} -Uint32 BigEndian32(Uint32 Val) -{ - return ((Val&0xFF)<<24) | ((Val&0xFF00)<<8) | ((Val>>8)&0xFF00) | ((Val>>24)&0xFF); -} // --- EXPORTS --- EXPORT(memcpy); EXPORT(memset); @@ -450,9 +447,6 @@ EXPORT(inb); EXPORT(inw); EXPORT(ind); EXPORT(outb); EXPORT(outw); EXPORT(outd); EXPORT(__udivdi3); EXPORT(__umoddi3); -EXPORT(LittleEndian16); EXPORT(BigEndian16); -EXPORT(LittleEndian32); EXPORT(BigEndian32); - EXPORT(SHORTLOCK); EXPORT(SHORTREL); EXPORT(IS_LOCKED);