X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=Kernel%2Farch%2Fx86%2Finclude%2Farch.h;h=755ecdfdb7877a59cf32b24e089ae25a2158f7df;hb=b68b764267d33a45539b4c910db13fbdae48f193;hp=ede3bf059ce5b2904572a2bf0e2d08a5bc56cdfd;hpb=2ae2035466cfa7816079a067ccee25e2b8bac6bc;p=tpg%2Facess2.git diff --git a/Kernel/arch/x86/include/arch.h b/Kernel/arch/x86/include/arch.h index ede3bf05..755ecdfd 100644 --- a/Kernel/arch/x86/include/arch.h +++ b/Kernel/arch/x86/include/arch.h @@ -9,6 +9,13 @@ // - Base Defintions #define KERNEL_BASE 0xC0000000 #define BITS 32 +#define PAGE_SIZE 0x1000 + +#define INVLPTR ((void*)-1) + +// Allow nested spinlocks? +#define STACKED_LOCKS 2 // 0: No, 1: Per-CPU, 2: Per-Thread +#define LOCK_DISABLE_INTS 1 // - Processor/Machine Specific Features #if ARCH != i386 && ARCH != i486 && ARCH != i586 @@ -34,59 +41,34 @@ * \brief Short Spinlock structure */ struct sShortSpinlock { + #if STACKED_LOCKS == 2 + volatile void *Lock; //!< Lock value + #else volatile int Lock; //!< Lock value - int IF; //!< Interrupt state on call to SHORTLOCK -}; -/** - * \brief Determine if a short spinlock is locked - * \param Lock Lock pointer - */ -static inline int IS_LOCKED(struct sShortSpinlock *Lock) { - return !!Lock->Lock; -} -/** - * \brief Acquire a Short Spinlock - * \param Lock Lock pointer - * - * This type of mutex should only be used for very short sections of code, - * or in places where a Mutex_* would be overkill, such as appending - * an element to linked list (usually two assignement lines in C) - * - * \note This type of lock halts interrupts, so ensure that no timing - * functions are called while it is held. - */ -static inline void SHORTLOCK(struct sShortSpinlock *Lock) { - int v = 1; - - // Save interrupt state - __ASM__ ("pushf;\n\tpop %%eax" : "=a"(Lock->IF)); - Lock->IF &= 0x200; - - // Stop interrupts - __ASM__ ("cli"); + #endif - // Wait for another CPU to release - while(v) - __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock)); -} -/** - * \brief Release a short lock - * \param Lock Lock pointer - */ -static inline void SHORTREL(struct sShortSpinlock *Lock) { - Lock->Lock = 0; - #if 0 // Which is faster?, meh the test is simpler - __ASM__ ("pushf;\n\tor %0, (%%esp);\n\tpopf" : : "a"(Lock->IF)); - #else - if(Lock->IF) __ASM__ ("sti"); + #if LOCK_DISABLE_INTS + int IF; //!< Interrupt state on call to SHORTLOCK + #endif + #if STACKED_LOCKS + int Depth; #endif -} +}; // === MACROS === /** - * \brief Halt the CPU + * \brief Halt the CPU (shorter version of yield) */ +#if 1 +#define HALT() do { \ + Uint32 flags; \ + __asm__ __volatile__ ("pushf;pop %0" : "=a"(flags)); \ + if( !(flags & 0x200) ) Panic("HALT called with interrupts disabled"); \ + __asm__ __volatile__ ("hlt"); \ +} while(0) +#else #define HALT() __asm__ __volatile__ ("hlt") +#endif /** * \brief Fire a magic breakpoint (bochs) */ @@ -104,17 +86,18 @@ typedef signed short Sint16; typedef signed long Sint32; typedef signed long long Sint64; typedef Uint size_t; +typedef char BOOL; -typedef Uint64 tPAddr; +typedef Uint32 tPAddr; typedef Uint32 tVAddr; typedef struct { - Uint gs, fs, es, ds; - Uint edi, esi, ebp, kesp; - Uint ebx, edx, ecx, eax; - Uint int_num, err_code; - Uint eip, cs; - Uint eflags, esp, ss; + Uint32 gs, fs, es, ds; + Uint32 edi, esi, ebp, kesp; + Uint32 ebx, edx, ecx, eax; + Uint32 int_num, err_code; + Uint32 eip, cs; + Uint32 eflags, esp, ss; } tRegs; typedef struct { @@ -150,6 +133,16 @@ typedef struct { typedef struct { Uint EIP, ESP, EBP; + Uint32 UserCS, UserEIP; } tTaskState; +// === FUNCTIONS === +extern void Debug_PutCharDebug(char ch); +extern void Debug_PutStringDebug(const char *String); + +extern int IS_LOCKED(struct sShortSpinlock *Lock); +extern int CPU_HAS_LOCK(struct sShortSpinlock *Lock); +extern void SHORTLOCK(struct sShortSpinlock *Lock); +extern void SHORTREL(struct sShortSpinlock *Lock); + #endif // !defined(_ARCH_H_)