f20753bcd62778b8fe01b39b59d47218a849444f
[tpg/acess2.git] / Kernel / arch / x86_64 / lib.c
1 /*
2  */
3 #include <acess.h>
4 #include <arch.h>
5
6 // === IMPORTS ===
7 extern int      GetCPUNum(void);
8
9 // === CODE ===
10 /**
11  * \brief Determine if a short spinlock is locked
12  * \param Lock  Lock pointer
13  */
14 int IS_LOCKED(struct sShortSpinlock *Lock)
15 {
16         return !!Lock->Lock;
17 }
18
19 /**
20  * \brief Check if the current CPU has the lock
21  * \param Lock  Lock pointer
22  */
23 int CPU_HAS_LOCK(struct sShortSpinlock *Lock)
24 {
25         #if STACKED_LOCKS == 1
26         return Lock->Lock == GetCPUNum() + 1;
27         #elif STACKED_LOCKS == 2
28         return Lock->Lock == Proc_GetCurThread();
29         #else
30         return 0;
31         #endif
32 }
33
34 /**
35  * \brief Acquire a Short Spinlock
36  * \param Lock  Lock pointer
37  * 
38  * This type of mutex should only be used for very short sections of code,
39  * or in places where a Mutex_* would be overkill, such as appending
40  * an element to linked list (usually two assignement lines in C)
41  * 
42  * \note This type of lock halts interrupts, so ensure that no timing
43  * functions are called while it is held. As a matter of fact, spend as
44  * little time as possible with this lock held
45  * \note If \a STACKED_LOCKS is set, this type of spinlock can be nested
46  */
47 void SHORTLOCK(struct sShortSpinlock *Lock)
48 {
49          int    v = 1;
50         #if LOCK_DISABLE_INTS
51          int    IF;
52         #endif
53         #if STACKED_LOCKS == 1
54          int    cpu = GetCPUNum() + 1;
55         #elif STACKED_LOCKS == 2
56         void    *thread = Proc_GetCurThread();
57         #endif
58         
59         #if LOCK_DISABLE_INTS
60         // Save interrupt state and clear interrupts
61         __ASM__ ("pushf;\n\tpop %%rax\n\tcli" : "=a"(IF));
62         IF &= 0x200;    // AND out all but the interrupt flag
63         #endif
64         
65         #if STACKED_LOCKS == 1
66         if( Lock->Lock == cpu ) {
67                 Lock->Depth ++;
68                 return ;
69         }
70         #elif STACKED_LOCKS == 2
71         if( Lock->Lock == thread ) {
72                 Lock->Depth ++;
73                 return ;
74         }
75         #endif
76         
77         // Wait for another CPU to release
78         while(v) {
79                 // CMPXCHG:
80                 //  If r/m32 == EAX, set ZF and set r/m32 = r32
81                 //  Else, clear ZF and set EAX = r/m32
82                 #if STACKED_LOCKS == 1
83                 __ASM__("lock cmpxchgl %2, (%3)"
84                         : "=a"(v)
85                         : "a"(0), "r"(cpu), "r"(&Lock->Lock)
86                         );
87                 #elif STACKED_LOCKS == 2
88                 __ASM__("lock cmpxchgl %2, (%3)"
89                         : "=a"(v)
90                         : "a"(0), "r"(thread), "r"(&Lock->Lock)
91                         );
92                 #else
93                 __ASM__("xchgl %0, (%2)":"=a"(v):"a"(1),"D"(&Lock->Lock));
94                 #endif
95         }
96         
97         #if LOCK_DISABLE_INTS
98         Lock->IF = IF;
99         #endif
100 }
101 /**
102  * \brief Release a short lock
103  * \param Lock  Lock pointer
104  */
105 void SHORTREL(struct sShortSpinlock *Lock)
106 {
107         #if STACKED_LOCKS
108         if( Lock->Depth ) {
109                 Lock->Depth --;
110                 return ;
111         }
112         #endif
113         
114         #if LOCK_DISABLE_INTS
115         // Lock->IF can change anytime once Lock->Lock is zeroed
116         if(Lock->IF) {
117                 Lock->Lock = 0;
118                 __ASM__ ("sti");
119         }
120         else {
121                 Lock->Lock = 0;
122         }
123         #else
124         Lock->Lock = 0;
125         #endif
126 }
127
128 void outb(Uint16 Port, Uint8 Data)
129 {
130         __asm__ __volatile__ ("outb %%al, %%dx"::"d"(Port),"a"(Data));
131 }
132 void outw(Uint16 Port, Uint16 Data)
133 {
134         __asm__ __volatile__ ("outw %%ax, %%dx"::"d"(Port),"a"(Data));
135 }
136 void outd(Uint16 Port, Uint32 Data)
137 {
138         __asm__ __volatile__ ("outl %%eax, %%dx"::"d"(Port),"a"(Data));
139 }
140 Uint8 inb(Uint16 Port)
141 {
142         Uint8   ret;
143         __asm__ __volatile__ ("inb %%dx, %%al":"=a"(ret):"d"(Port));
144         return ret;
145 }
146 Uint16 inw(Uint16 Port)
147 {
148         Uint16  ret;
149         __asm__ __volatile__ ("inw %%dx, %%ax":"=a"(ret):"d"(Port));
150         return ret;
151 }
152 Uint32 ind(Uint16 Port)
153 {
154         Uint32  ret;
155         __asm__ __volatile__ ("inl %%dx, %%eax":"=a"(ret):"d"(Port));
156         return ret;
157 }
158
159 // === Endianness ===
160 Uint32 BigEndian32(Uint32 Value)
161 {
162         Uint32  ret;
163         ret = (Value >> 24);
164         ret |= ((Value >> 16) & 0xFF) << 8;
165         ret |= ((Value >>  8) & 0xFF) << 16;
166         ret |= ((Value >>  0) & 0xFF) << 24;
167         return ret;
168 }
169
170 Uint16 BigEndian16(Uint16 Value)
171 {
172         return  (Value>>8)|(Value<<8);
173 }
174
175 // === Memory Manipulation ===
176 int memcmp(const void *__dest, const void *__src, size_t __count)
177 {
178         if( ((tVAddr)__dest & 7) != ((tVAddr)__src & 7) ) {
179                 const Uint8     *src = __src, *dst = __dest;
180                 while(__count)
181                 {
182                         if( *src != *dst )
183                                 return *dst - *src;
184                         src ++; dst ++; __count --;
185                 }
186                 return 0;
187         }
188         else {
189                 const Uint8     *src = __src;
190                 const Uint8     *dst = __dest;
191                 const Uint64    *src64, *dst64;
192                 
193                 while( (tVAddr)src & 7 && __count ) {
194                         if( *src != *dst )
195                                 return *dst - *src;
196                         dst ++; src ++; __count --;
197                 }
198
199                 src64 = (void*)src;
200                 dst64 = (void*)dst;
201
202                 while( __count >= 8 )
203                 {
204                         if( *src64 != *dst64 )
205                         {
206                                 src = (void*)src64;
207                                 dst = (void*)dst64;
208                                 if(src[0] != dst[0])    return dst[0]-src[0];
209                                 if(src[1] != dst[1])    return dst[1]-src[1];
210                                 if(src[2] != dst[2])    return dst[2]-src[2];
211                                 if(src[3] != dst[3])    return dst[3]-src[3];
212                                 if(src[4] != dst[4])    return dst[4]-src[4];
213                                 if(src[5] != dst[5])    return dst[5]-src[5];
214                                 if(src[6] != dst[6])    return dst[6]-src[6];
215                                 if(src[7] != dst[7])    return dst[7]-src[7];
216                                 return -1;      // This should never happen
217                         }
218                         __count -= 8;
219                         src64 ++;
220                         dst64 ++;
221                 }
222
223                 src = (void*)src64;
224                 dst = (void*)dst64;
225                 while( __count-- )
226                 {
227                         if(*dst != *src)        return *dst - *src;
228                         dst ++;
229                         src ++;
230                 }
231         }
232         return 0;
233 }
234
235 void *memcpy(void *__dest, const void *__src, size_t __count)
236 {
237         if( ((tVAddr)__dest & 7) != ((tVAddr)__src & 7) )
238                 __asm__ __volatile__ ("rep movsb" : : "D"(__dest),"S"(__src),"c"(__count));
239         else {
240                 const Uint8     *src = __src;
241                 Uint8   *dst = __dest;
242                 while( (tVAddr)src & 7 && __count ) {
243                         *dst++ = *src++;
244                         __count --;
245                 }
246
247                 __asm__ __volatile__ ("rep movsq" : : "D"(dst),"S"(src),"c"(__count/8));
248                 src += __count & ~7;
249                 dst += __count & ~7;
250                 __count = __count & 7;
251                 while( __count-- )
252                         *dst++ = *src++;
253         }
254         return __dest;
255 }
256
257 void *memset(void *__dest, int __val, size_t __count)
258 {
259         if( __val != 0 || ((tVAddr)__dest & 7) != 0 )
260                 __asm__ __volatile__ ("rep stosb" : : "D"(__dest),"a"(__val),"c"(__count));
261         else {
262                 Uint8   *dst = __dest;
263
264                 __asm__ __volatile__ ("rep stosq" : : "D"(dst),"a"(0),"c"(__count/8));
265                 dst += __count & ~7;
266                 __count = __count & 7;
267                 while( __count-- )
268                         *dst++ = 0;
269         }
270         return __dest;
271 }
272
273 void *memsetd(void *__dest, Uint32 __val, size_t __count)
274 {
275         __asm__ __volatile__ ("rep stosl" : : "D"(__dest),"a"(__val),"c"(__count));
276         return __dest;
277 }
278

UCC git Repository :: git.ucc.asn.au