4 * arch/i386/include/arch.h
10 #define KERNEL_BASE 0xC0000000
13 // Allow nested spinlocks?
14 #define STACKED_LOCKS 1
15 #define LOCK_DISABLE_INTS 0
17 // - Processor/Machine Specific Features
18 #if ARCH != i386 && ARCH != i486 && ARCH != i586
19 # error "Unknown architecture '" #ARCH "'"
34 #define __ASM__ __asm__ __volatile__
38 * \brief Short Spinlock structure
40 struct sShortSpinlock {
41 volatile int Lock; //!< Lock value
43 int IF; //!< Interrupt state on call to SHORTLOCK
50 * \brief Determine if a short spinlock is locked
51 * \param Lock Lock pointer
53 static inline int IS_LOCKED(struct sShortSpinlock *Lock) {
58 * \brief Check if the current CPU has the lock
59 * \param Lock Lock pointer
61 static inline int CPU_HAS_LOCK(struct sShortSpinlock *Lock) {
62 extern int GetCPUNum(void);
63 return Lock->Lock == GetCPUNum() + 1;
67 * \brief Acquire a Short Spinlock
68 * \param Lock Lock pointer
70 * This type of mutex should only be used for very short sections of code,
71 * or in places where a Mutex_* would be overkill, such as appending
72 * an element to linked list (usually two assignement lines in C)
74 * \note This type of lock halts interrupts, so ensure that no timing
75 * functions are called while it is held. As a matter of fact, spend as
76 * little time as possible with this lock held
77 * \note If \a STACKED_LOCKS is set, this type of spinlock can be nested
79 static inline void SHORTLOCK(struct sShortSpinlock *Lock) {
85 extern int GetCPUNum(void);
86 int cpu = GetCPUNum() + 1;
90 // Save interrupt state and clear interrupts
91 __ASM__ ("pushf;\n\tpop %%eax\n\tcli" : "=a"(IF));
92 IF &= 0x200; // AND out all but the interrupt flag
96 if( Lock->Lock == cpu ) {
102 // Wait for another CPU to release
106 // If r/m32 == EAX, set ZF and set r/m32 = r32
107 // Else, clear ZF and set EAX = r/m32
108 __ASM__("lock cmpxchgl %2, (%3)"
110 : "a"(0), "r"(cpu), "r"(&Lock->Lock)
113 __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock));
117 #if LOCK_DISABLE_INTS
122 * \brief Release a short lock
123 * \param Lock Lock pointer
125 static inline void SHORTREL(struct sShortSpinlock *Lock) {
133 #if LOCK_DISABLE_INTS
134 // Lock->IF can change anytime once Lock->Lock is zeroed
149 * \brief Halt the CPU
151 #define HALT() __asm__ __volatile__ ("hlt")
153 * \brief Fire a magic breakpoint (bochs)
155 #define MAGIC_BREAK() __asm__ __volatile__ ("xchg %bx, %bx")
158 typedef unsigned int Uint; // Unsigned machine native integer
159 typedef unsigned char Uint8;
160 typedef unsigned short Uint16;
161 typedef unsigned long Uint32;
162 typedef unsigned long long Uint64;
163 typedef signed int Sint; // Signed Machine Native integer
164 typedef signed char Sint8;
165 typedef signed short Sint16;
166 typedef signed long Sint32;
167 typedef signed long long Sint64;
171 typedef Uint64 tPAddr;
172 typedef Uint32 tVAddr;
176 Uint edi, esi, ebp, kesp;
177 Uint ebx, edx, ecx, eax;
178 Uint int_num, err_code;
180 Uint eflags, esp, ss;
184 Uint Resvd1[4]; // GS, FS, ES, DS
185 Uint Arg4, Arg5; // EDI, ESI
187 Uint Resvd2[1]; // Kernel ESP
194 Uint RetHi; // High 32 bits of ret
201 Uint Resvd3[5]; // Int, Err, Eip, CS, ...
202 Uint StackPointer; // ESP
203 Uint Resvd4[1]; // SS
218 #endif // !defined(_ARCH_H_)