Still some issues, but it seems to not crash anymore
[tpg/acess2.git] / Kernel / arch / x86 / include / arch.h
index 0fec144..0126be3 100644 (file)
 #define        KERNEL_BASE     0xC0000000
 #define BITS   32
 
+// Allow nested spinlocks?
+#define STACKED_LOCKS  1
+#define LOCK_DISABLE_INTS      0
+
 // - Processor/Machine Specific Features
 #if ARCH != i386 && ARCH != i486 && ARCH != i586
 # error "Unknown architecture '" #ARCH "'"
  */
 struct sShortSpinlock {
        volatile int    Lock;   //!< Lock value
+       #if LOCK_DISABLE_INTS
         int    IF;     //!< Interrupt state on call to SHORTLOCK
+       #endif
+       #if STACKED_LOCKS
+        int    Depth;
+       #endif
 };
 /**
  * \brief Determine if a short spinlock is locked
@@ -44,6 +53,16 @@ struct sShortSpinlock {
 static inline int IS_LOCKED(struct sShortSpinlock *Lock) {
        return !!Lock->Lock;
 }
+
+/**
+ * \brief Check if the current CPU has the lock
+ * \param Lock Lock pointer
+ */
+static inline int CPU_HAS_LOCK(struct sShortSpinlock *Lock) {
+       extern int      GetCPUNum(void);
+       return Lock->Lock == GetCPUNum() + 1;
+}
+
 /**
  * \brief Acquire a Short Spinlock
  * \param Lock Lock pointer
@@ -53,28 +72,64 @@ static inline int IS_LOCKED(struct sShortSpinlock *Lock) {
  * an element to linked list (usually two assignement lines in C)
  * 
  * \note This type of lock halts interrupts, so ensure that no timing
- * functions are called while it is held.
+ * functions are called while it is held. As a matter of fact, spend as
+ * little time as possible with this lock held
  */
 static inline void SHORTLOCK(struct sShortSpinlock *Lock) {
         int    v = 1;
+       #if LOCK_DISABLE_INTS
         int    IF;
-       // int  val = GetCPUNum() + 1;
+       #endif
+       #if STACKED_LOCKS
+       extern int      GetCPUNum(void);
+        int    cpu = GetCPUNum() + 1;
+       #endif
        
+       #if LOCK_DISABLE_INTS
        // Save interrupt state and clear interrupts
-       __ASM__ ("pushf;\n\tcli;\n\tpop %%eax" : "=a"(IF));
-       IF &= 0x200;
+       __ASM__ ("pushf;\n\tpop %%eax\n\tcli" : "=a"(IF));
+       IF &= 0x200;    // AND out all but the interrupt flag
+       #endif
+       
+       #if STACKED_LOCKS
+       if( Lock->Lock == cpu ) {
+               Lock->Depth ++;
+               return ;
+       }
+       #endif
        
        // Wait for another CPU to release
-       while(v)
-               __ASM__("xchgl %%ecx, (%%edi)":"=c"(v):"a"(1),"D"(&Lock->Lock));
+       while(v) {
+               #if STACKED_LOCKS
+               // CMPXCHG:
+               //  If r/m32 == EAX, set ZF and set r/m32 = r32
+               //  Else, clear ZF and set EAX = r/m32
+               __ASM__("lock cmpxchgl %2, (%3)"
+                       : "=a"(v)
+                       : "a"(0), "r"(cpu), "r"(&Lock->Lock)
+                       );
+               #else
+               __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock));
+               #endif
+       }
        
+       #if LOCK_DISABLE_INTS
        Lock->IF = IF;
+       #endif
 }
 /**
  * \brief Release a short lock
  * \param Lock Lock pointer
  */
 static inline void SHORTREL(struct sShortSpinlock *Lock) {
+       #if STACKED_LOCKS
+       if( Lock->Depth ) {
+               Lock->Depth --;
+               return ;
+       }
+       #endif
+       
+       #if LOCK_DISABLE_INTS
        // Lock->IF can change anytime once Lock->Lock is zeroed
        if(Lock->IF) {
                Lock->Lock = 0;
@@ -83,6 +138,9 @@ static inline void SHORTREL(struct sShortSpinlock *Lock) {
        else {
                Lock->Lock = 0;
        }
+       #else
+       Lock->Lock = 0;
+       #endif
 }
 
 // === MACROS ===

UCC git Repository :: git.ucc.asn.au