From: John Hodge Date: Sun, 22 Aug 2010 02:37:46 +0000 (+0800) Subject: Fixing commenting (always nice) X-Git-Tag: rel0.06~50 X-Git-Url: https://git.ucc.asn.au/?a=commitdiff_plain;h=2ae2035466cfa7816079a067ccee25e2b8bac6bc;p=tpg%2Facess2.git Fixing commenting (always nice) --- diff --git a/Kernel/arch/x86/include/arch.h b/Kernel/arch/x86/include/arch.h index 67a62901..ede3bf05 100644 --- a/Kernel/arch/x86/include/arch.h +++ b/Kernel/arch/x86/include/arch.h @@ -29,47 +29,67 @@ #define __ASM__ __asm__ __volatile__ -#define LONGLOCK_NUM_THREADS 8 - -// === MACROS === +// === Spinlocks === +/** + * \brief Short Spinlock structure + */ struct sShortSpinlock { - volatile int Lock; - int IF; + volatile int Lock; //!< Lock value + int IF; //!< Interrupt state on call to SHORTLOCK }; /** * \brief Determine if a short spinlock is locked + * \param Lock Lock pointer */ static inline int IS_LOCKED(struct sShortSpinlock *Lock) { return !!Lock->Lock; } /** * \brief Acquire a Short Spinlock - * \note Stops interrupts, so be careful + * \param Lock Lock pointer + * + * This type of mutex should only be used for very short sections of code, + * or in places where a Mutex_* would be overkill, such as appending + * an element to linked list (usually two assignement lines in C) + * + * \note This type of lock halts interrupts, so ensure that no timing + * functions are called while it is held. */ static inline void SHORTLOCK(struct sShortSpinlock *Lock) { int v = 1; + + // Save interrupt state __ASM__ ("pushf;\n\tpop %%eax" : "=a"(Lock->IF)); Lock->IF &= 0x200; - __ASM__ ("cli"); // Stop task switches + + // Stop interrupts + __ASM__ ("cli"); + // Wait for another CPU to release while(v) __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock)); } /** * \brief Release a short lock + * \param Lock Lock pointer */ static inline void SHORTREL(struct sShortSpinlock *Lock) { Lock->Lock = 0; - #if 0 + #if 0 // Which is faster?, meh the test is simpler __ASM__ ("pushf;\n\tor %0, (%%esp);\n\tpopf" : : "a"(Lock->IF)); #else if(Lock->IF) __ASM__ ("sti"); #endif } + +// === MACROS === /** * \brief Halt the CPU */ #define HALT() __asm__ __volatile__ ("hlt") +/** + * \brief Fire a magic breakpoint (bochs) + */ #define MAGIC_BREAK() __asm__ __volatile__ ("xchg %bx, %bx") // === TYPES === diff --git a/Kernel/threads.c b/Kernel/threads.c index 8ec0e9fc..99a9add6 100644 --- a/Kernel/threads.c +++ b/Kernel/threads.c @@ -840,7 +840,14 @@ void Threads_SegFault(tVAddr Addr) } /** - * \brief heavy mutex + * \brief Acquire a heavy mutex + * \param Mutex Mutex to acquire + * + * This type of mutex checks if the mutex is avaliable, and acquires it + * if it is. Otherwise, the current thread is added to the mutex's wait + * queue and the thread suspends. When the holder of the mutex completes, + * the oldest thread (top thread) on the queue is given the lock and + * restarted. */ void Mutex_Acquire(tMutex *Mutex) { @@ -884,7 +891,8 @@ void Mutex_Acquire(tMutex *Mutex) } /** - * \brief Release a held spinlock + * \brief Release a held mutex + * \param Mutex Mutex to release */ void Mutex_Release(tMutex *Mutex) { @@ -904,6 +912,10 @@ void Mutex_Release(tMutex *Mutex) SHORTREL( &Mutex->Protector ); } +/** + * \brief Is this mutex locked? + * \param Mutex Mutex pointer + */ int Mutex_IsLocked(tMutex *Mutex) { return Mutex->Owner != NULL;