Kernel/x86 - Added backtrace to running program in thread dump
[tpg/acess2.git] / KernelLand / Kernel / arch / armv7 / include / lock.h
1 /*
2  * Acess2
3  * ARM7 Architecture
4  *
5  * lock.h - Hardware level spinlocks
6  */
7 #ifndef _LOCK_H_
8 #define _LOCK_H_
9
10 // === CODE ===
11 struct sShortSpinlock {
12          int    Lock;
13 };
14
15 // --- Spinlocks ---
16 static inline int IS_LOCKED(struct sShortSpinlock *Lock)
17 {
18         return !!Lock->Lock;
19 }
20
21 static inline int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
22 {
23         // TODO: Handle multiple CPUs
24         return !!Lock->Lock;
25 }
26
27 static inline int SHORTLOCK(struct sShortSpinlock *Lock)
28 {
29         #if 0
30         // Coped from linux, yes, but I know what it does now :)
31         Uint    tmp;
32         __asm__ __volatile__ (
33         "1:     ldrex   %0, [%1]\n"     // Exclusive LOAD
34         "       teq     %0, #0\n"       // Check if zero
35         "       strexeq %0, %2, [%1]\n" // Set to one if it is zero (releasing lock on the memory)
36         "       teqeq   %0, #0\n"       // If the lock was avaliable, check if the write succeeded
37         "       bne     1b"     // If the lock was unavaliable, or the write failed, loop
38                 : "=&r" (tmp)   // Temp
39                 : "r" (&Lock->Lock), "r" (1)
40                 : "cc"  // Condition codes clobbered
41                 );
42         #elif 1
43         while( *(volatile int*)&Lock->Lock )    ;
44         Lock->Lock = 1;
45         #else
46          int    v = 1;
47         while( v )
48                 __asm__ __volatile__ (
49                         "swp %0, %0, [%1]"
50                         : "=r" (v) : "r" (&Lock->Lock)
51                         : "cc"
52                         );
53         #endif
54         return 1;
55 }
56
57 static inline void SHORTREL(struct sShortSpinlock *Lock)
58 {
59         Lock->Lock = 0;
60 }
61
62 #endif
63

UCC git Repository :: git.ucc.asn.au