X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=Kernel%2Farch%2Fx86%2Finclude%2Farch.h;h=eff85fc2af4363799aab3c158f05f34bd908d24f;hb=de2ae10743172075f2d527780bdfd890ccddb8e7;hp=773556840a9dca2ca272d5fc2884ef3fff67a422;hpb=a506fc15c09f7d8f178a7c7d9658b5bf45778128;p=tpg%2Facess2.git diff --git a/Kernel/arch/x86/include/arch.h b/Kernel/arch/x86/include/arch.h index 77355684..eff85fc2 100644 --- a/Kernel/arch/x86/include/arch.h +++ b/Kernel/arch/x86/include/arch.h @@ -29,44 +29,71 @@ #define __ASM__ __asm__ __volatile__ -// === MACROS === -typedef volatile int tSpinlock; -#define IS_LOCKED(lockptr) (!!(*(tSpinlock*)lockptr)) +// === Spinlocks === /** - * \brief Inter-Process interrupt (does a Yield) + * \brief Short Spinlock structure */ -#define LOCK(lockptr) do {\ - int v=1;\ - while(v) {\ - __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(lockptr));\ - if(v) Threads_Yield();\ - }\ -}while(0) +struct sShortSpinlock { + volatile int Lock; //!< Lock value + int IF; //!< Interrupt state on call to SHORTLOCK +}; /** - * \brief Tight spinlock (does a HLT) + * \brief Determine if a short spinlock is locked + * \param Lock Lock pointer */ -#define TIGHTLOCK(lockptr) do{\ - int v=1;\ - while(v) {\ - __ASM__("xchgl %%eax,(%%edi)":"=a"(v):"a"(1),"D"(lockptr));\ - if(v) __ASM__("hlt");\ - }\ -}while(0) +static inline int IS_LOCKED(struct sShortSpinlock *Lock) { + return !!Lock->Lock; +} /** - * \brief Very Tight spinlock (short inter-cpu lock) + * \brief Acquire a Short Spinlock + * \param Lock Lock pointer + * + * This type of mutex should only be used for very short sections of code, + * or in places where a Mutex_* would be overkill, such as appending + * an element to linked list (usually two assignement lines in C) + * + * \note This type of lock halts interrupts, so ensure that no timing + * functions are called while it is held. */ -#define VTIGHTLOCK(lockptr) do{\ - int v=1;\ - while(v)__ASM__("xchgl %%eax,(%%edi)":"=a"(v):"a"(1),"D"(lockptr));\ -}while(0) +static inline void SHORTLOCK(struct sShortSpinlock *Lock) { + int v = 1; + int IF; + // int cpu = GetCPUNum() + 1; + + // Save interrupt state and clear interrupts + __ASM__ ("pushf;\n\tcli;\n\tpop %%eax" : "=a"(IF)); + IF &= 0x200; + + // Wait for another CPU to release + while(v) + __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock)); + + Lock->IF = IF; +} /** - * \brief Release a held spinlock + * \brief Release a short lock + * \param Lock Lock pointer */ -#define RELEASE(lockptr) __ASM__("lock andl $0, (%%edi)"::"D"(lockptr)); +static inline void SHORTREL(struct sShortSpinlock *Lock) { + // Lock->IF can change anytime once Lock->Lock is zeroed + if(Lock->IF) { + Lock->Lock = 0; + __ASM__ ("sti"); + } + else { + Lock->Lock = 0; + } +} + +// === MACROS === /** * \brief Halt the CPU */ #define HALT() __asm__ __volatile__ ("hlt") +/** + * \brief Fire a magic breakpoint (bochs) + */ +#define MAGIC_BREAK() __asm__ __volatile__ ("xchg %bx, %bx") // === TYPES === typedef unsigned int Uint; // Unsigned machine native integer