2 * AcessOS Microkernel Version
14 #define DEBUG_TRACE_SWITCH 0
17 #define SWITCH_MAGIC 0xFFFACE55 // There is no code in this area
18 #define TIMER_DIVISOR 11931 //~100Hz
22 extern Uint GetEIP(); // start.asm
23 extern Uint32 gaInitPageDir[1024]; // start.asm
24 extern void Kernel_Stack_Top;
25 extern volatile int giThreadListLock;
28 extern int giTotalTickets;
29 extern int giNumActiveThreads;
30 extern tThread gThreadZero;
31 extern tThread *gActiveThreads;
32 extern tThread *gSleepingThreads;
33 extern tThread *gDeleteThreads;
34 extern tThread *Threads_GetNextToRun(int CPU);
35 extern void Threads_Dump();
36 extern tThread *Threads_CloneTCB(Uint *Err, Uint Flags);
39 void ArchThreads_Init();
40 tThread *Proc_GetCurThread();
41 void Proc_ChangeStack();
42 int Proc_Clone(Uint *Err, Uint Flags);
43 void Proc_Scheduler();
46 // --- Current State ---
48 tThread *gCurrentThread[MAX_CPUS] = {NULL};
50 tThread *gCurrentThread = NULL;
52 // --- Multiprocessing ---
54 tMPInfo *gMPTable = NULL;
57 Uint32 *gPML4s[4] = NULL;
66 * \fn void ArchThreads_Init()
67 * \brief Starts the process scheduler
69 void ArchThreads_Init()
73 // -- Initialise Multiprocessing
74 // Find MP Floating Table
76 for(pos = KERNEL_BASE|0x9FC00; pos < (KERNEL_BASE|0xA0000); pos += 16) {
77 if( *(Uint*)(pos) == MPTABLE_IDENT ) {
78 if(ByteSum( (void*)pos, sizeof(tMPInfo) ) != 0) continue;
79 gMPTable = (void*)pos;
89 for(pos = KERNEL_BASE|0xF0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
90 if( *(Uint*)(pos) == MPTABLE_IDENT ) {
91 if(ByteSum( (void*)pos, sizeof(tMPInfo) ) != 0) continue;
92 gMPTable = (void*)pos;
98 // If the MP Table Exists, parse it
101 Panic("Uh oh... MP Table Parsing is unimplemented\n");
110 for(pos=0;pos<giNumCPUs;pos++)
115 gTSSs[pos].SS0 = 0x10;
116 gTSSs[pos].ESP0 = 0; // Set properly by scheduler
117 gGDT[5+pos].LimitLow = sizeof(tTSS);
118 gGDT[5+pos].LimitHi = 0;
119 gGDT[5+pos].Access = 0x89; // Type
120 gGDT[5+pos].Flags = 0x4;
121 gGDT[5+pos].BaseLow = (Uint)&gTSSs[pos] & 0xFFFF;
122 gGDT[5+pos].BaseMid = (Uint)&gTSSs[pos] >> 16;
123 gGDT[5+pos].BaseHi = (Uint)&gTSSs[pos] >> 24;
126 for(pos=0;pos<giNumCPUs;pos++) {
128 __asm__ __volatile__ ("ltr %%ax"::"a"(0x28+pos*8));
134 gCurrentThread[0] = &gThreadZero;
136 gCurrentThread = &gThreadZero;
140 gThreadZero.MemState.PDP[0] = 0;
141 gThreadZero.MemState.PDP[1] = 0;
142 gThreadZero.MemState.PDP[2] = 0;
144 gThreadZero.MemState.CR3 = (Uint)gaInitPageDir - KERNEL_BASE;
147 // Set timer frequency
148 outb(0x43, 0x34); // Set Channel 0, Low/High, Rate Generator
149 outb(0x40, TIMER_DIVISOR&0xFF); // Low Byte of Divisor
150 outb(0x40, (TIMER_DIVISOR>>8)&0xFF); // High Byte
152 // Create Per-Process Data Block
153 MM_Allocate(MM_PPD_CFG);
160 * \fn void Proc_Start()
161 * \brief Start process scheduler
165 // Start Interrupts (and hence scheduler)
166 __asm__ __volatile__("sti");
170 * \fn tThread *Proc_GetCurThread()
171 * \brief Gets the current thread
173 tThread *Proc_GetCurThread()
178 return gCurrentThread;
183 * \fn void Proc_ChangeStack()
184 * \brief Swaps the current stack for a new one (in the proper stack reigon)
186 void Proc_ChangeStack()
190 Uint curBase, newBase;
192 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
193 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
198 newBase = MM_NewKStack();
201 Panic("What the?? Unable to allocate space for initial kernel stack");
205 curBase = (Uint)&Kernel_Stack_Top;
207 LOG("curBase = 0x%x, newBase = 0x%x", curBase, newBase);
209 // Get ESP as a used size
211 LOG("memcpy( %p, %p, 0x%x )", (void*)(newBase - esp), (void*)(curBase - esp), esp );
213 memcpy( (void*)(newBase - esp), (void*)(curBase - esp), esp );
214 // Get ESP as an offset in the new stack
217 ebp = newBase - (curBase - ebp);
219 // Repair EBPs & Stack Addresses
220 // Catches arguments also, but may trash stack-address-like values
221 for(tmpEbp = esp; tmpEbp < newBase; tmpEbp += 4)
223 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < curBase)
224 *(Uint*)tmpEbp += newBase - curBase;
227 gCurrentThread->KernelStack = newBase;
229 __asm__ __volatile__ ("mov %0, %%esp"::"r"(esp));
230 __asm__ __volatile__ ("mov %0, %%ebp"::"r"(ebp));
234 * \fn int Proc_Clone(Uint *Err, Uint Flags)
235 * \brief Clone the current process
237 int Proc_Clone(Uint *Err, Uint Flags)
240 tThread *cur = Proc_GetCurThread();
243 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
244 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
246 newThread = Threads_CloneTCB(Err, Flags);
247 if(!newThread) return -1;
249 // Initialise Memory Space (New Addr space or kernel stack)
250 if(Flags & CLONE_VM) {
251 newThread->MemState.CR3 = MM_Clone();
252 newThread->KernelStack = cur->KernelStack;
254 Uint tmpEbp, oldEsp = esp;
257 newThread->MemState.CR3 = cur->MemState.CR3;
260 newThread->KernelStack = MM_NewKStack();
262 if(newThread->KernelStack == 0) {
267 // Get ESP as a used size
268 esp = cur->KernelStack - esp;
270 memcpy( (void*)(newThread->KernelStack - esp), (void*)(cur->KernelStack - esp), esp );
271 // Get ESP as an offset in the new stack
272 esp = newThread->KernelStack - esp;
274 ebp = newThread->KernelStack - (cur->KernelStack - ebp);
276 // Repair EBPs & Stack Addresses
277 // Catches arguments also, but may trash stack-address-like values
278 for(tmpEbp = esp; tmpEbp < newThread->KernelStack; tmpEbp += 4)
280 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < cur->KernelStack)
281 *(Uint*)tmpEbp += newThread->KernelStack - cur->KernelStack;
285 // Save core machine state
286 newThread->SavedState.ESP = esp;
287 newThread->SavedState.EBP = ebp;
289 if(eip == SWITCH_MAGIC) {
290 outb(0x20, 0x20); // ACK Timer and return as child
295 newThread->SavedState.EIP = eip;
297 // Lock list and add to active
298 Threads_AddActive(newThread);
300 return newThread->TID;
304 * \fn Uint Proc_MakeUserStack()
305 * \brief Creates a new user stack
307 Uint Proc_MakeUserStack()
310 Uint base = USER_STACK_TOP - USER_STACK_SZ;
312 // Check Prospective Space
313 for( i = USER_STACK_SZ >> 12; i--; )
314 if( MM_GetPhysAddr( base + (i<<12) ) != 0 )
317 if(i != -1) return 0;
319 // Allocate Stack - Allocate incrementally to clean up MM_Dump output
320 for( i = 0; i < USER_STACK_SZ/4069; i++ )
321 MM_Allocate( base + (i<<12) );
323 return base + USER_STACK_SZ;
328 * \fn void Proc_StartUser(Uint Entrypoint, Uint Base, int ArgC, char **ArgV, char **EnvP, int DataSize)
329 * \brief Starts a user task
331 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
333 Uint *stack = (void*)Proc_MakeUserStack();
338 LOG("stack = 0x%x", stack);
341 stack = (void*)( (Uint)stack - DataSize );
342 memcpy( stack, ArgV, DataSize );
344 // Adjust Arguments and environment
345 delta = (Uint)stack - (Uint)ArgV;
346 ArgV = (char**)stack;
347 for( i = 0; ArgV[i]; i++ ) ArgV[i] += delta;
350 for( i = 0; EnvP[i]; i++ ) EnvP[i] += delta;
352 // User Mode Segments
353 ss = 0x23; cs = 0x1B;
356 *--stack = (Uint)EnvP;
357 *--stack = (Uint)ArgV;
358 *--stack = (Uint)ArgC;
361 *--stack = 0; // Return Address
362 delta = (Uint)stack; // Reuse delta to save SP
364 *--stack = ss; //Stack Segment
365 *--stack = delta; //Stack Pointer
366 *--stack = 0x0202; //EFLAGS (Resvd (0x2) and IF (0x20))
367 *--stack = cs; //Code Segment
368 *--stack = Entrypoint; //EIP
370 *--stack = 0xAAAAAAAA; // eax
371 *--stack = 0xCCCCCCCC; // ecx
372 *--stack = 0xDDDDDDDD; // edx
373 *--stack = 0xBBBBBBBB; // ebx
374 *--stack = 0xD1D1D1D1; // edi
375 *--stack = 0x54545454; // esp - NOT POPED
376 *--stack = 0x51515151; // esi
377 *--stack = 0xB4B4B4B4; // ebp
384 __asm__ __volatile__ (
385 "mov %%eax,%%esp;\n\t" // Set stack pointer
391 "iret;\n\t" : : "a" (stack));
396 * \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
397 * \brief Demotes a process to a lower permission level
398 * \param Err Pointer to user's errno
400 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
402 int cpl = Regs->cs & 3;
404 if(Dest > 3 || Dest < 0) {
415 // Change the Segment Registers
416 Regs->cs = (((Dest+1)<<4) | Dest) - 8;
417 Regs->ss = ((Dest+1)<<4) | Dest;
418 // Check if the GP Segs are GDT, then change them
419 if(!(Regs->ds & 4)) Regs->ds = ((Dest+1)<<4) | Dest;
420 if(!(Regs->es & 4)) Regs->es = ((Dest+1)<<4) | Dest;
421 if(!(Regs->fs & 4)) Regs->fs = ((Dest+1)<<4) | Dest;
422 if(!(Regs->gs & 4)) Regs->gs = ((Dest+1)<<4) | Dest;
428 * \fn void Proc_Scheduler(int CPU)
429 * \brief Swap current thread and clears dead threads
431 void Proc_Scheduler(int CPU)
436 // If the spinlock is set, let it complete
437 if(giThreadListLock) return;
439 // Clear Delete Queue
440 while(gDeleteThreads)
442 thread = gDeleteThreads->Next;
443 if(gDeleteThreads->IsLocked) { // Only free if structure is unused
444 gDeleteThreads->Status = THREAD_STAT_NULL;
445 free( gDeleteThreads );
447 gDeleteThreads = thread;
450 // Check if there is any tasks running
451 if(giNumActiveThreads == 0) {
452 Log("No Active threads, sleeping");
453 __asm__ __volatile__ ("hlt");
457 // Reduce remaining quantum and continue timeslice if non-zero
458 if(gCurrentThread->Remaining--) return;
459 // Reset quantum for next call
460 gCurrentThread->Remaining = gCurrentThread->Quantum;
463 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
464 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
466 if(eip == SWITCH_MAGIC) return; // Check if a switch happened
468 // Save machine state
469 gCurrentThread->SavedState.ESP = esp;
470 gCurrentThread->SavedState.EBP = ebp;
471 gCurrentThread->SavedState.EIP = eip;
474 thread = Threads_GetNextToRun(CPU);
478 Warning("Hmm... Threads_GetNextToRun returned NULL, I don't think this should happen.\n");
482 #if DEBUG_TRACE_SWITCH
483 Log("Switching to task %i, CR3 = 0x%x, EIP = %p",
485 thread->MemState.CR3,
486 thread->SavedState.EIP
490 // Set current thread
491 gCurrentThread = thread;
493 // Update Kernel Stack pointer
494 gTSSs[CPU].ESP0 = thread->KernelStack;
497 __asm__ __volatile__ ("mov %0, %%cr3"::"a"(gCurrentThread->MemState.CR3));
499 __asm__ __volatile__ (
503 "a"(SWITCH_MAGIC), "b"(gCurrentThread->SavedState.ESP),
504 "d"(gCurrentThread->SavedState.EBP), "c"(gCurrentThread->SavedState.EIP));
505 for(;;); // Shouldn't reach here