X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=Kernel%2Farch%2Fx86%2Finclude%2Farch.h;h=f9813ed5aceaa348ce177bfca683d5acdd7c19ea;hb=9d85201216cb35e1b1e051b1d7cdc38eaa5befa4;hp=eff85fc2af4363799aab3c158f05f34bd908d24f;hpb=de2ae10743172075f2d527780bdfd890ccddb8e7;p=tpg%2Facess2.git diff --git a/Kernel/arch/x86/include/arch.h b/Kernel/arch/x86/include/arch.h index eff85fc2..f9813ed5 100644 --- a/Kernel/arch/x86/include/arch.h +++ b/Kernel/arch/x86/include/arch.h @@ -10,6 +10,12 @@ #define KERNEL_BASE 0xC0000000 #define BITS 32 +#define INVLPTR ((void*)-1) + +// Allow nested spinlocks? +#define STACKED_LOCKS 2 // 0: No, 1: Per-CPU, 2: Per-Thread +#define LOCK_DISABLE_INTS 1 + // - Processor/Machine Specific Features #if ARCH != i386 && ARCH != i486 && ARCH != i586 # error "Unknown architecture '" #ARCH "'" @@ -34,60 +40,23 @@ * \brief Short Spinlock structure */ struct sShortSpinlock { + #if STACKED_LOCKS == 2 + volatile void *Lock; //!< Lock value + #else volatile int Lock; //!< Lock value + #endif + + #if LOCK_DISABLE_INTS int IF; //!< Interrupt state on call to SHORTLOCK + #endif + #if STACKED_LOCKS + int Depth; + #endif }; -/** - * \brief Determine if a short spinlock is locked - * \param Lock Lock pointer - */ -static inline int IS_LOCKED(struct sShortSpinlock *Lock) { - return !!Lock->Lock; -} -/** - * \brief Acquire a Short Spinlock - * \param Lock Lock pointer - * - * This type of mutex should only be used for very short sections of code, - * or in places where a Mutex_* would be overkill, such as appending - * an element to linked list (usually two assignement lines in C) - * - * \note This type of lock halts interrupts, so ensure that no timing - * functions are called while it is held. - */ -static inline void SHORTLOCK(struct sShortSpinlock *Lock) { - int v = 1; - int IF; - // int cpu = GetCPUNum() + 1; - - // Save interrupt state and clear interrupts - __ASM__ ("pushf;\n\tcli;\n\tpop %%eax" : "=a"(IF)); - IF &= 0x200; - - // Wait for another CPU to release - while(v) - __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock)); - - Lock->IF = IF; -} -/** - * \brief Release a short lock - * \param Lock Lock pointer - */ -static inline void SHORTREL(struct sShortSpinlock *Lock) { - // Lock->IF can change anytime once Lock->Lock is zeroed - if(Lock->IF) { - Lock->Lock = 0; - __ASM__ ("sti"); - } - else { - Lock->Lock = 0; - } -} // === MACROS === /** - * \brief Halt the CPU + * \brief Halt the CPU (shorter version of yield) */ #define HALT() __asm__ __volatile__ ("hlt") /** @@ -107,16 +76,17 @@ typedef signed short Sint16; typedef signed long Sint32; typedef signed long long Sint64; typedef Uint size_t; +typedef char BOOL; typedef Uint64 tPAddr; typedef Uint32 tVAddr; typedef struct { - Uint gs, fs, es, ds; - Uint edi, esi, ebp, kesp; + Uint gs, fs, es, ds; + Uint edi, esi, ebp, kesp; Uint ebx, edx, ecx, eax; - Uint int_num, err_code; - Uint eip, cs; + Uint int_num, err_code; + Uint eip, cs; Uint eflags, esp, ss; } tRegs; @@ -155,4 +125,10 @@ typedef struct { Uint EIP, ESP, EBP; } tTaskState; +// === FUNCTIONS === +extern int IS_LOCKED(struct sShortSpinlock *Lock); +extern int CPU_HAS_LOCK(struct sShortSpinlock *Lock); +extern void SHORTLOCK(struct sShortSpinlock *Lock); +extern void SHORTREL(struct sShortSpinlock *Lock); + #endif // !defined(_ARCH_H_)