2 * AcessOS Microkernel Version
14 #define DEBUG_TRACE_SWITCH 0
17 #define SWITCH_MAGIC 0xFFFACE55 // There is no code in this area
18 #define TIMER_DIVISOR 11931 //~100Hz
22 extern Uint GetEIP(); // start.asm
23 extern Uint32 gaInitPageDir[1024]; // start.asm
24 extern void Kernel_Stack_Top;
25 extern volatile int giThreadListLock;
28 extern int giTotalTickets;
29 extern int giNumActiveThreads;
30 extern tThread gThreadZero;
31 extern tThread *gActiveThreads;
32 extern tThread *gSleepingThreads;
33 extern tThread *gDeleteThreads;
34 extern tThread *Threads_GetNextToRun(int CPU);
35 extern void Threads_Dump();
36 extern tThread *Threads_CloneTCB(Uint *Err, Uint Flags);
40 void ArchThreads_Init();
41 tThread *Proc_GetCurThread();
42 void Proc_ChangeStack();
43 int Proc_Clone(Uint *Err, Uint Flags);
44 void Proc_Scheduler();
47 // --- Current State ---
49 tThread *gCurrentThread[MAX_CPUS] = {NULL};
51 tThread *gCurrentThread = NULL;
53 // --- Multiprocessing ---
55 tMPInfo *gMPFloatPtr = NULL;
58 Uint32 *gPML4s[4] = NULL;
62 // --- Error Recovery ---
63 char gaDoubleFaultStack[1024];
64 tTSS gDoubleFault_TSS = {
65 .ESP0 = (Uint)&gaDoubleFaultStack[1023],
72 * \fn void ArchThreads_Init()
73 * \brief Starts the process scheduler
75 void ArchThreads_Init()
82 // -- Initialise Multiprocessing
83 // Find MP Floating Table
84 // - EBDA/Last 1Kib (640KiB)
85 for(pos = KERNEL_BASE|0x9F000; pos < (KERNEL_BASE|0xA0000); pos += 16) {
86 if( *(Uint*)(pos) == MPPTR_IDENT ) {
87 Log("Possible %p", pos);
88 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
89 gMPFloatPtr = (void*)pos;
93 // - Last KiB (512KiB base mem)
95 for(pos = KERNEL_BASE|0x7F000; pos < (KERNEL_BASE|0x80000); pos += 16) {
96 if( *(Uint*)(pos) == MPPTR_IDENT ) {
97 Log("Possible %p", pos);
98 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
99 gMPFloatPtr = (void*)pos;
106 for(pos = KERNEL_BASE|0xE0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
107 if( *(Uint*)(pos) == MPPTR_IDENT ) {
108 Log("Possible %p", pos);
109 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
110 gMPFloatPtr = (void*)pos;
116 // If the MP Table Exists, parse it
119 Log("gMPFloatPtr = %p", gMPFloatPtr);
120 Log("*gMPFloatPtr = {");
121 Log("\t.Sig = 0x%08x", gMPFloatPtr->Sig);
122 Log("\t.MPConfig = 0x%08x", gMPFloatPtr->MPConfig);
123 Log("\t.Length = 0x%02x", gMPFloatPtr->Length);
124 Log("\t.Version = 0x%02x", gMPFloatPtr->Version);
125 Log("\t.Checksum = 0x%02x", gMPFloatPtr->Checksum);
126 Log("\t.Features = [0x%02x,0x%02x,0x%02x,0x%02x,0x%02x]",
127 gMPFloatPtr->Features[0], gMPFloatPtr->Features[1],
128 gMPFloatPtr->Features[2], gMPFloatPtr->Features[3],
129 gMPFloatPtr->Features[4]
133 mptable = (void*)( KERNEL_BASE|gMPFloatPtr->MPConfig );
134 Log("mptable = %p", mptable);
136 Log("\t.Sig = 0x%08x", mptable->Sig);
137 Log("\t.BaseTableLength = 0x%04x", mptable->BaseTableLength);
138 Log("\t.SpecRev = 0x%02x", mptable->SpecRev);
139 Log("\t.Checksum = 0x%02x", mptable->Checksum);
140 Log("\t.OEMID = '%8c'", mptable->OemID);
141 Log("\t.ProductID = '%8c'", mptable->ProductID);
144 Panic("Uh oh... MP Table Parsing is unimplemented\n");
147 Log("No MP Table was found, assuming uniprocessor\n");
156 // Initialise Double Fault TSS
158 gGDT[5].LimitLow = sizeof(tTSS);
160 gGDT[5].Access = 0x89; // Type
163 gGDT[5].BaseLow = (Uint)&gDoubleFault_TSS & 0xFFFF;
164 gGDT[5].BaseMid = (Uint)&gDoubleFault_TSS >> 16;
165 gGDT[5].BaseHi = (Uint)&gDoubleFault_TSS >> 24;
168 // Initialise Normal TSS(s)
169 for(pos=0;pos<giNumCPUs;pos++)
174 gTSSs[pos].SS0 = 0x10;
175 gTSSs[pos].ESP0 = 0; // Set properly by scheduler
176 gGDT[6+pos].BaseLow = ((Uint)(&gTSSs[pos])) & 0xFFFF;
177 gGDT[6+pos].BaseMid = ((Uint)(&gTSSs[pos])) >> 16;
178 gGDT[6+pos].BaseHi = ((Uint)(&gTSSs[pos])) >> 24;
180 gGDT[6+pos].LimitLow = sizeof(tTSS);
181 gGDT[6+pos].LimitHi = 0;
182 gGDT[6+pos].Access = 0x89; // Type
183 gGDT[6+pos].Flags = 0x4;
187 for(pos=0;pos<giNumCPUs;pos++) {
189 __asm__ __volatile__ ("ltr %%ax"::"a"(0x30+pos*8));
195 gCurrentThread[0] = &gThreadZero;
197 gCurrentThread = &gThreadZero;
201 gThreadZero.MemState.PDP[0] = 0;
202 gThreadZero.MemState.PDP[1] = 0;
203 gThreadZero.MemState.PDP[2] = 0;
205 gThreadZero.MemState.CR3 = (Uint)gaInitPageDir - KERNEL_BASE;
208 // Set timer frequency
209 outb(0x43, 0x34); // Set Channel 0, Low/High, Rate Generator
210 outb(0x40, TIMER_DIVISOR&0xFF); // Low Byte of Divisor
211 outb(0x40, (TIMER_DIVISOR>>8)&0xFF); // High Byte
213 // Create Per-Process Data Block
214 MM_Allocate(MM_PPD_CFG);
221 * \fn void Proc_Start()
222 * \brief Start process scheduler
226 // Start Interrupts (and hence scheduler)
227 __asm__ __volatile__("sti");
231 * \fn tThread *Proc_GetCurThread()
232 * \brief Gets the current thread
234 tThread *Proc_GetCurThread()
239 return gCurrentThread;
244 * \fn void Proc_ChangeStack()
245 * \brief Swaps the current stack for a new one (in the proper stack reigon)
247 void Proc_ChangeStack()
251 Uint curBase, newBase;
253 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
254 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
259 newBase = MM_NewKStack();
262 Panic("What the?? Unable to allocate space for initial kernel stack");
266 curBase = (Uint)&Kernel_Stack_Top;
268 LOG("curBase = 0x%x, newBase = 0x%x", curBase, newBase);
270 // Get ESP as a used size
272 LOG("memcpy( %p, %p, 0x%x )", (void*)(newBase - esp), (void*)(curBase - esp), esp );
274 memcpy( (void*)(newBase - esp), (void*)(curBase - esp), esp );
275 // Get ESP as an offset in the new stack
278 ebp = newBase - (curBase - ebp);
280 // Repair EBPs & Stack Addresses
281 // Catches arguments also, but may trash stack-address-like values
282 for(tmpEbp = esp; tmpEbp < newBase; tmpEbp += 4)
284 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < curBase)
285 *(Uint*)tmpEbp += newBase - curBase;
288 Proc_GetCurThread()->KernelStack = newBase;
290 __asm__ __volatile__ ("mov %0, %%esp"::"r"(esp));
291 __asm__ __volatile__ ("mov %0, %%ebp"::"r"(ebp));
295 * \fn int Proc_Clone(Uint *Err, Uint Flags)
296 * \brief Clone the current process
298 int Proc_Clone(Uint *Err, Uint Flags)
301 tThread *cur = Proc_GetCurThread();
304 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
305 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
307 newThread = Threads_CloneTCB(Err, Flags);
308 if(!newThread) return -1;
310 // Initialise Memory Space (New Addr space or kernel stack)
311 if(Flags & CLONE_VM) {
312 newThread->MemState.CR3 = MM_Clone();
313 newThread->KernelStack = cur->KernelStack;
315 Uint tmpEbp, oldEsp = esp;
318 newThread->MemState.CR3 = cur->MemState.CR3;
321 newThread->KernelStack = MM_NewKStack();
323 if(newThread->KernelStack == 0) {
328 // Get ESP as a used size
329 esp = cur->KernelStack - esp;
331 memcpy( (void*)(newThread->KernelStack - esp), (void*)(cur->KernelStack - esp), esp );
332 // Get ESP as an offset in the new stack
333 esp = newThread->KernelStack - esp;
335 ebp = newThread->KernelStack - (cur->KernelStack - ebp);
337 // Repair EBPs & Stack Addresses
338 // Catches arguments also, but may trash stack-address-like values
339 for(tmpEbp = esp; tmpEbp < newThread->KernelStack; tmpEbp += 4)
341 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < cur->KernelStack)
342 *(Uint*)tmpEbp += newThread->KernelStack - cur->KernelStack;
346 // Save core machine state
347 newThread->SavedState.ESP = esp;
348 newThread->SavedState.EBP = ebp;
350 if(eip == SWITCH_MAGIC) {
351 outb(0x20, 0x20); // ACK Timer and return as child
356 newThread->SavedState.EIP = eip;
358 // Lock list and add to active
359 Threads_AddActive(newThread);
361 return newThread->TID;
365 * \fn int Proc_SpawnWorker()
366 * \brief Spawns a new worker thread
368 int Proc_SpawnWorker()
373 cur = Proc_GetCurThread();
376 new = malloc( sizeof(tThread) );
378 Warning("Proc_SpawnWorker - Out of heap space!\n");
381 memcpy(new, &gThreadZero, sizeof(tThread));
383 new->TID = giNextTID++;
384 // Create a new worker stack (in PID0's address space)
385 // The stack is relocated by this code
386 new->KernelStack = MM_NewWorkerStack();
388 // Get ESP and EBP based in the new stack
389 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
390 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
391 esp = new->KernelStack - (cur->KernelStack - esp);
392 ebp = new->KernelStack - (cur->KernelStack - ebp);
394 // Save core machine state
395 new->SavedState.ESP = esp;
396 new->SavedState.EBP = ebp;
398 if(eip == SWITCH_MAGIC) {
399 outb(0x20, 0x20); // ACK Timer and return as child
404 new->SavedState.EIP = eip;
406 new->Status = THREAD_STAT_ACTIVE;
407 Threads_AddActive( new );
413 * \fn Uint Proc_MakeUserStack()
414 * \brief Creates a new user stack
416 Uint Proc_MakeUserStack()
419 Uint base = USER_STACK_TOP - USER_STACK_SZ;
421 // Check Prospective Space
422 for( i = USER_STACK_SZ >> 12; i--; )
423 if( MM_GetPhysAddr( base + (i<<12) ) != 0 )
426 if(i != -1) return 0;
428 // Allocate Stack - Allocate incrementally to clean up MM_Dump output
429 for( i = 0; i < USER_STACK_SZ/4069; i++ )
430 MM_Allocate( base + (i<<12) );
432 return base + USER_STACK_SZ;
437 * \fn void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
438 * \brief Starts a user task
440 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
442 Uint *stack = (void*)Proc_MakeUserStack();
447 LOG("stack = 0x%x", stack);
450 stack = (void*)( (Uint)stack - DataSize );
451 memcpy( stack, ArgV, DataSize );
453 // Adjust Arguments and environment
454 delta = (Uint)stack - (Uint)ArgV;
455 ArgV = (char**)stack;
456 for( i = 0; ArgV[i]; i++ ) ArgV[i] += delta;
459 for( i = 0; EnvP[i]; i++ ) EnvP[i] += delta;
461 // User Mode Segments
462 ss = 0x23; cs = 0x1B;
465 *--stack = (Uint)EnvP;
466 *--stack = (Uint)ArgV;
467 *--stack = (Uint)ArgC;
470 *--stack = 0; // Return Address
471 delta = (Uint)stack; // Reuse delta to save SP
473 *--stack = ss; //Stack Segment
474 *--stack = delta; //Stack Pointer
475 *--stack = 0x0202; //EFLAGS (Resvd (0x2) and IF (0x20))
476 *--stack = cs; //Code Segment
477 *--stack = Entrypoint; //EIP
479 *--stack = 0xAAAAAAAA; // eax
480 *--stack = 0xCCCCCCCC; // ecx
481 *--stack = 0xDDDDDDDD; // edx
482 *--stack = 0xBBBBBBBB; // ebx
483 *--stack = 0xD1D1D1D1; // edi
484 *--stack = 0x54545454; // esp - NOT POPED
485 *--stack = 0x51515151; // esi
486 *--stack = 0xB4B4B4B4; // ebp
493 __asm__ __volatile__ (
494 "mov %%eax,%%esp;\n\t" // Set stack pointer
500 "iret;\n\t" : : "a" (stack));
505 * \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
506 * \brief Demotes a process to a lower permission level
507 * \param Err Pointer to user's errno
508 * \param Dest New Permission Level
509 * \param Regs Pointer to user's register structure
511 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
513 int cpl = Regs->cs & 3;
515 if(Dest > 3 || Dest < 0) {
526 // Change the Segment Registers
527 Regs->cs = (((Dest+1)<<4) | Dest) - 8;
528 Regs->ss = ((Dest+1)<<4) | Dest;
529 // Check if the GP Segs are GDT, then change them
530 if(!(Regs->ds & 4)) Regs->ds = ((Dest+1)<<4) | Dest;
531 if(!(Regs->es & 4)) Regs->es = ((Dest+1)<<4) | Dest;
532 if(!(Regs->fs & 4)) Regs->fs = ((Dest+1)<<4) | Dest;
533 if(!(Regs->gs & 4)) Regs->gs = ((Dest+1)<<4) | Dest;
539 * \fn void Proc_Scheduler(int CPU)
540 * \brief Swap current thread and clears dead threads
542 void Proc_Scheduler(int CPU)
547 // If the spinlock is set, let it complete
548 if(giThreadListLock) return;
550 // Clear Delete Queue
551 while(gDeleteThreads)
553 thread = gDeleteThreads->Next;
554 if(gDeleteThreads->IsLocked) { // Only free if structure is unused
555 gDeleteThreads->Status = THREAD_STAT_NULL;
556 free( gDeleteThreads );
558 gDeleteThreads = thread;
561 // Check if there is any tasks running
562 if(giNumActiveThreads == 0) {
563 Log("No Active threads, sleeping");
564 __asm__ __volatile__ ("hlt");
568 // Get current thread
570 thread = gCurrentThread[CPU];
572 curThread = gCurrentThread;
575 // Reduce remaining quantum and continue timeslice if non-zero
576 if(thread->Remaining--) return;
577 // Reset quantum for next call
578 thread->Remaining = thread->Quantum;
581 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
582 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
584 if(eip == SWITCH_MAGIC) return; // Check if a switch happened
586 // Save machine state
587 thread->SavedState.ESP = esp;
588 thread->SavedState.EBP = ebp;
589 thread->SavedState.EIP = eip;
592 thread = Threads_GetNextToRun(CPU);
596 Warning("Hmm... Threads_GetNextToRun returned NULL, I don't think this should happen.\n");
600 #if DEBUG_TRACE_SWITCH
601 Log("Switching to task %i, CR3 = 0x%x, EIP = %p",
603 thread->MemState.CR3,
604 thread->SavedState.EIP
608 // Set current thread
610 gCurrentThread[CPU] = thread;
612 gCurrentThread = thread;
615 // Update Kernel Stack pointer
616 gTSSs[CPU].ESP0 = thread->KernelStack;
620 # error "Todo: Implement PAE Address space switching"
622 __asm__ __volatile__ ("mov %0, %%cr3"::"a"(thread->MemState.CR3));
625 __asm__ __volatile__ (
626 "mov %1, %%esp\n\t" // Restore ESP
627 "mov %2, %%ebp\n\t" // and EBP
628 "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler)
629 "a"(SWITCH_MAGIC), "b"(thread->SavedState.ESP),
630 "d"(thread->SavedState.EBP), "c"(thread->SavedState.EIP)
632 for(;;); // Shouldn't reach here
636 EXPORT(Proc_SpawnWorker);