4 * arch/i386/include/arch.h
10 #define KERNEL_BASE 0xC0000000
13 // Allow nested spinlocks?
14 #define STACKED_LOCKS 1
15 #define LOCK_DISABLE_INTS 0
17 // - Processor/Machine Specific Features
18 #if ARCH != i386 && ARCH != i486 && ARCH != i586
19 # error "Unknown architecture '" #ARCH "'"
34 #define __ASM__ __asm__ __volatile__
38 * \brief Short Spinlock structure
40 struct sShortSpinlock {
41 volatile int Lock; //!< Lock value
43 int IF; //!< Interrupt state on call to SHORTLOCK
50 * \brief Determine if a short spinlock is locked
51 * \param Lock Lock pointer
53 static inline int IS_LOCKED(struct sShortSpinlock *Lock) {
58 * \brief Check if the current CPU has the lock
59 * \param Lock Lock pointer
61 static inline int CPU_HAS_LOCK(struct sShortSpinlock *Lock) {
62 extern int GetCPUNum(void);
63 return Lock->Lock == GetCPUNum() + 1;
67 * \brief Acquire a Short Spinlock
68 * \param Lock Lock pointer
70 * This type of mutex should only be used for very short sections of code,
71 * or in places where a Mutex_* would be overkill, such as appending
72 * an element to linked list (usually two assignement lines in C)
74 * \note This type of lock halts interrupts, so ensure that no timing
75 * functions are called while it is held. As a matter of fact, spend as
76 * little time as possible with this lock held
78 static inline void SHORTLOCK(struct sShortSpinlock *Lock) {
84 extern int GetCPUNum(void);
85 int cpu = GetCPUNum() + 1;
89 // Save interrupt state and clear interrupts
90 __ASM__ ("pushf;\n\tpop %%eax\n\tcli" : "=a"(IF));
91 IF &= 0x200; // AND out all but the interrupt flag
95 if( Lock->Lock == cpu ) {
101 // Wait for another CPU to release
105 // If r/m32 == EAX, set ZF and set r/m32 = r32
106 // Else, clear ZF and set EAX = r/m32
107 __ASM__("lock cmpxchgl %2, (%3)"
109 : "a"(0), "r"(cpu), "r"(&Lock->Lock)
112 __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock));
116 #if LOCK_DISABLE_INTS
121 * \brief Release a short lock
122 * \param Lock Lock pointer
124 static inline void SHORTREL(struct sShortSpinlock *Lock) {
132 #if LOCK_DISABLE_INTS
133 // Lock->IF can change anytime once Lock->Lock is zeroed
148 * \brief Halt the CPU
150 #define HALT() __asm__ __volatile__ ("hlt")
152 * \brief Fire a magic breakpoint (bochs)
154 #define MAGIC_BREAK() __asm__ __volatile__ ("xchg %bx, %bx")
157 typedef unsigned int Uint; // Unsigned machine native integer
158 typedef unsigned char Uint8;
159 typedef unsigned short Uint16;
160 typedef unsigned long Uint32;
161 typedef unsigned long long Uint64;
162 typedef signed int Sint; // Signed Machine Native integer
163 typedef signed char Sint8;
164 typedef signed short Sint16;
165 typedef signed long Sint32;
166 typedef signed long long Sint64;
169 typedef Uint64 tPAddr;
170 typedef Uint32 tVAddr;
174 Uint edi, esi, ebp, kesp;
175 Uint ebx, edx, ecx, eax;
176 Uint int_num, err_code;
178 Uint eflags, esp, ss;
182 Uint Resvd1[4]; // GS, FS, ES, DS
183 Uint Arg4, Arg5; // EDI, ESI
185 Uint Resvd2[1]; // Kernel ESP
192 Uint RetHi; // High 32 bits of ret
199 Uint Resvd3[5]; // Int, Err, Eip, CS, ...
200 Uint StackPointer; // ESP
201 Uint Resvd4[1]; // SS
216 #endif // !defined(_ARCH_H_)