2 * AcessOS Microkernel Version
14 #define DEBUG_TRACE_SWITCH 0
17 #define SWITCH_MAGIC 0xFFFACE55 // There is no code in this area
18 #define TIMER_DIVISOR 11931 //~100Hz
22 extern Uint GetEIP(); // start.asm
23 extern Uint32 gaInitPageDir[1024]; // start.asm
24 extern void Kernel_Stack_Top;
25 extern volatile int giThreadListLock;
28 extern int giTotalTickets;
29 extern int giNumActiveThreads;
30 extern tThread gThreadZero;
31 extern tThread *gActiveThreads;
32 extern tThread *gSleepingThreads;
33 extern tThread *gDeleteThreads;
34 extern tThread *Threads_GetNextToRun(int CPU);
35 extern void Threads_Dump();
36 extern tThread *Threads_CloneTCB(Uint *Err, Uint Flags);
40 void ArchThreads_Init();
41 tThread *Proc_GetCurThread();
42 void Proc_ChangeStack();
43 int Proc_Clone(Uint *Err, Uint Flags);
44 void Proc_Scheduler();
47 // --- Current State ---
49 tThread *gCurrentThread[MAX_CPUS] = {NULL};
51 tThread *gCurrentThread = NULL;
53 // --- Multiprocessing ---
55 tMPInfo *gMPTable = NULL;
58 Uint32 *gPML4s[4] = NULL;
64 // --- Error Recovery ---
65 char gaDoubleFaultStack[1024];
66 tTSS gDoubleFault_TSS = {
67 .ESP0 = (Uint)&gaDoubleFaultStack[1023],
74 * \fn void ArchThreads_Init()
75 * \brief Starts the process scheduler
77 void ArchThreads_Init()
81 // -- Initialise Multiprocessing
82 // Find MP Floating Table
84 for(pos = KERNEL_BASE|0x9FC00; pos < (KERNEL_BASE|0xA0000); pos += 16) {
85 if( *(Uint*)(pos) == MPTABLE_IDENT ) {
86 if(ByteSum( (void*)pos, sizeof(tMPInfo) ) != 0) continue;
87 gMPTable = (void*)pos;
97 for(pos = KERNEL_BASE|0xF0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
98 if( *(Uint*)(pos) == MPTABLE_IDENT ) {
99 if(ByteSum( (void*)pos, sizeof(tMPInfo) ) != 0) continue;
100 gMPTable = (void*)pos;
106 // If the MP Table Exists, parse it
109 Panic("Uh oh... MP Table Parsing is unimplemented\n");
118 // Initialise Double Fault TSS
120 gGDT[5].LimitLow = sizeof(tTSS);
122 gGDT[5].Access = 0x89; // Type
125 gGDT[5].BaseLow = (Uint)&gDoubleFault_TSS & 0xFFFF;
126 gGDT[5].BaseMid = (Uint)&gDoubleFault_TSS >> 16;
127 gGDT[5].BaseHi = (Uint)&gDoubleFault_TSS >> 24;
130 // Initialise Normal TSS(s)
131 for(pos=0;pos<giNumCPUs;pos++)
136 gTSSs[pos].SS0 = 0x10;
137 gTSSs[pos].ESP0 = 0; // Set properly by scheduler
138 gGDT[6+pos].BaseLow = ((Uint)(&gTSSs[pos])) & 0xFFFF;
139 gGDT[6+pos].BaseMid = ((Uint)(&gTSSs[pos])) >> 16;
140 gGDT[6+pos].BaseHi = ((Uint)(&gTSSs[pos])) >> 24;
142 gGDT[6+pos].LimitLow = sizeof(tTSS);
143 gGDT[6+pos].LimitHi = 0;
144 gGDT[6+pos].Access = 0x89; // Type
145 gGDT[6+pos].Flags = 0x4;
149 for(pos=0;pos<giNumCPUs;pos++) {
151 __asm__ __volatile__ ("ltr %%ax"::"a"(0x30+pos*8));
157 gCurrentThread[0] = &gThreadZero;
159 gCurrentThread = &gThreadZero;
163 gThreadZero.MemState.PDP[0] = 0;
164 gThreadZero.MemState.PDP[1] = 0;
165 gThreadZero.MemState.PDP[2] = 0;
167 gThreadZero.MemState.CR3 = (Uint)gaInitPageDir - KERNEL_BASE;
170 // Set timer frequency
171 outb(0x43, 0x34); // Set Channel 0, Low/High, Rate Generator
172 outb(0x40, TIMER_DIVISOR&0xFF); // Low Byte of Divisor
173 outb(0x40, (TIMER_DIVISOR>>8)&0xFF); // High Byte
175 // Create Per-Process Data Block
176 MM_Allocate(MM_PPD_CFG);
183 * \fn void Proc_Start()
184 * \brief Start process scheduler
188 // Start Interrupts (and hence scheduler)
189 __asm__ __volatile__("sti");
193 * \fn tThread *Proc_GetCurThread()
194 * \brief Gets the current thread
196 tThread *Proc_GetCurThread()
201 return gCurrentThread;
206 * \fn void Proc_ChangeStack()
207 * \brief Swaps the current stack for a new one (in the proper stack reigon)
209 void Proc_ChangeStack()
213 Uint curBase, newBase;
215 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
216 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
221 newBase = MM_NewKStack();
224 Panic("What the?? Unable to allocate space for initial kernel stack");
228 curBase = (Uint)&Kernel_Stack_Top;
230 LOG("curBase = 0x%x, newBase = 0x%x", curBase, newBase);
232 // Get ESP as a used size
234 LOG("memcpy( %p, %p, 0x%x )", (void*)(newBase - esp), (void*)(curBase - esp), esp );
236 memcpy( (void*)(newBase - esp), (void*)(curBase - esp), esp );
237 // Get ESP as an offset in the new stack
240 ebp = newBase - (curBase - ebp);
242 // Repair EBPs & Stack Addresses
243 // Catches arguments also, but may trash stack-address-like values
244 for(tmpEbp = esp; tmpEbp < newBase; tmpEbp += 4)
246 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < curBase)
247 *(Uint*)tmpEbp += newBase - curBase;
250 gCurrentThread->KernelStack = newBase;
252 __asm__ __volatile__ ("mov %0, %%esp"::"r"(esp));
253 __asm__ __volatile__ ("mov %0, %%ebp"::"r"(ebp));
257 * \fn int Proc_Clone(Uint *Err, Uint Flags)
258 * \brief Clone the current process
260 int Proc_Clone(Uint *Err, Uint Flags)
263 tThread *cur = Proc_GetCurThread();
266 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
267 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
269 newThread = Threads_CloneTCB(Err, Flags);
270 if(!newThread) return -1;
272 // Initialise Memory Space (New Addr space or kernel stack)
273 if(Flags & CLONE_VM) {
274 newThread->MemState.CR3 = MM_Clone();
275 newThread->KernelStack = cur->KernelStack;
277 Uint tmpEbp, oldEsp = esp;
280 newThread->MemState.CR3 = cur->MemState.CR3;
283 newThread->KernelStack = MM_NewKStack();
285 if(newThread->KernelStack == 0) {
290 // Get ESP as a used size
291 esp = cur->KernelStack - esp;
293 memcpy( (void*)(newThread->KernelStack - esp), (void*)(cur->KernelStack - esp), esp );
294 // Get ESP as an offset in the new stack
295 esp = newThread->KernelStack - esp;
297 ebp = newThread->KernelStack - (cur->KernelStack - ebp);
299 // Repair EBPs & Stack Addresses
300 // Catches arguments also, but may trash stack-address-like values
301 for(tmpEbp = esp; tmpEbp < newThread->KernelStack; tmpEbp += 4)
303 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < cur->KernelStack)
304 *(Uint*)tmpEbp += newThread->KernelStack - cur->KernelStack;
308 // Save core machine state
309 newThread->SavedState.ESP = esp;
310 newThread->SavedState.EBP = ebp;
312 if(eip == SWITCH_MAGIC) {
313 outb(0x20, 0x20); // ACK Timer and return as child
318 newThread->SavedState.EIP = eip;
320 // Lock list and add to active
321 Threads_AddActive(newThread);
323 return newThread->TID;
327 * \fn int Proc_SpawnWorker()
328 * \brief Spawns a new worker thread
330 int Proc_SpawnWorker()
335 cur = Proc_GetCurThread();
338 new = malloc( sizeof(tThread) );
340 Warning("Proc_SpawnWorker - Out of heap space!\n");
343 memcpy(new, &gThreadZero, sizeof(tThread));
345 new->TID = giNextTID++;
346 // Create a new worker stack (in PID0's address space)
347 // The stack is relocated by this code
348 new->KernelStack = MM_NewWorkerStack();
350 // Get ESP and EBP based in the new stack
351 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
352 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
353 esp = new->KernelStack - (cur->KernelStack - esp);
354 ebp = new->KernelStack - (cur->KernelStack - ebp);
356 // Save core machine state
357 new->SavedState.ESP = esp;
358 new->SavedState.EBP = ebp;
360 if(eip == SWITCH_MAGIC) {
361 outb(0x20, 0x20); // ACK Timer and return as child
366 new->SavedState.EIP = eip;
368 new->Status = THREAD_STAT_ACTIVE;
369 Threads_AddActive( new );
375 * \fn Uint Proc_MakeUserStack()
376 * \brief Creates a new user stack
378 Uint Proc_MakeUserStack()
381 Uint base = USER_STACK_TOP - USER_STACK_SZ;
383 // Check Prospective Space
384 for( i = USER_STACK_SZ >> 12; i--; )
385 if( MM_GetPhysAddr( base + (i<<12) ) != 0 )
388 if(i != -1) return 0;
390 // Allocate Stack - Allocate incrementally to clean up MM_Dump output
391 for( i = 0; i < USER_STACK_SZ/4069; i++ )
392 MM_Allocate( base + (i<<12) );
394 return base + USER_STACK_SZ;
399 * \fn void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
400 * \brief Starts a user task
402 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
404 Uint *stack = (void*)Proc_MakeUserStack();
409 LOG("stack = 0x%x", stack);
412 stack = (void*)( (Uint)stack - DataSize );
413 memcpy( stack, ArgV, DataSize );
415 // Adjust Arguments and environment
416 delta = (Uint)stack - (Uint)ArgV;
417 ArgV = (char**)stack;
418 for( i = 0; ArgV[i]; i++ ) ArgV[i] += delta;
421 for( i = 0; EnvP[i]; i++ ) EnvP[i] += delta;
423 // User Mode Segments
424 ss = 0x23; cs = 0x1B;
427 *--stack = (Uint)EnvP;
428 *--stack = (Uint)ArgV;
429 *--stack = (Uint)ArgC;
432 *--stack = 0; // Return Address
433 delta = (Uint)stack; // Reuse delta to save SP
435 *--stack = ss; //Stack Segment
436 *--stack = delta; //Stack Pointer
437 *--stack = 0x0202; //EFLAGS (Resvd (0x2) and IF (0x20))
438 *--stack = cs; //Code Segment
439 *--stack = Entrypoint; //EIP
441 *--stack = 0xAAAAAAAA; // eax
442 *--stack = 0xCCCCCCCC; // ecx
443 *--stack = 0xDDDDDDDD; // edx
444 *--stack = 0xBBBBBBBB; // ebx
445 *--stack = 0xD1D1D1D1; // edi
446 *--stack = 0x54545454; // esp - NOT POPED
447 *--stack = 0x51515151; // esi
448 *--stack = 0xB4B4B4B4; // ebp
455 __asm__ __volatile__ (
456 "mov %%eax,%%esp;\n\t" // Set stack pointer
462 "iret;\n\t" : : "a" (stack));
467 * \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
468 * \brief Demotes a process to a lower permission level
469 * \param Err Pointer to user's errno
470 * \param Dest New Permission Level
471 * \param Regs Pointer to user's register structure
473 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
475 int cpl = Regs->cs & 3;
477 if(Dest > 3 || Dest < 0) {
488 // Change the Segment Registers
489 Regs->cs = (((Dest+1)<<4) | Dest) - 8;
490 Regs->ss = ((Dest+1)<<4) | Dest;
491 // Check if the GP Segs are GDT, then change them
492 if(!(Regs->ds & 4)) Regs->ds = ((Dest+1)<<4) | Dest;
493 if(!(Regs->es & 4)) Regs->es = ((Dest+1)<<4) | Dest;
494 if(!(Regs->fs & 4)) Regs->fs = ((Dest+1)<<4) | Dest;
495 if(!(Regs->gs & 4)) Regs->gs = ((Dest+1)<<4) | Dest;
501 * \fn void Proc_Scheduler(int CPU)
502 * \brief Swap current thread and clears dead threads
504 void Proc_Scheduler(int CPU)
509 // If the spinlock is set, let it complete
510 if(giThreadListLock) return;
512 // Clear Delete Queue
513 while(gDeleteThreads)
515 thread = gDeleteThreads->Next;
516 if(gDeleteThreads->IsLocked) { // Only free if structure is unused
517 gDeleteThreads->Status = THREAD_STAT_NULL;
518 free( gDeleteThreads );
520 gDeleteThreads = thread;
523 // Check if there is any tasks running
524 if(giNumActiveThreads == 0) {
525 Log("No Active threads, sleeping");
526 __asm__ __volatile__ ("hlt");
530 // Reduce remaining quantum and continue timeslice if non-zero
531 if(gCurrentThread->Remaining--) return;
532 // Reset quantum for next call
533 gCurrentThread->Remaining = gCurrentThread->Quantum;
536 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
537 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
539 if(eip == SWITCH_MAGIC) return; // Check if a switch happened
541 // Save machine state
542 gCurrentThread->SavedState.ESP = esp;
543 gCurrentThread->SavedState.EBP = ebp;
544 gCurrentThread->SavedState.EIP = eip;
547 thread = Threads_GetNextToRun(CPU);
551 Warning("Hmm... Threads_GetNextToRun returned NULL, I don't think this should happen.\n");
555 #if DEBUG_TRACE_SWITCH
556 Log("Switching to task %i, CR3 = 0x%x, EIP = %p",
558 thread->MemState.CR3,
559 thread->SavedState.EIP
563 // Set current thread
564 gCurrentThread = thread;
566 // Update Kernel Stack pointer
567 gTSSs[CPU].ESP0 = thread->KernelStack;
570 if( gCurrentThread->MemState.CR3 != 0 )
571 __asm__ __volatile__ ("mov %0, %%cr3"::"a"(gCurrentThread->MemState.CR3));
573 __asm__ __volatile__ (
574 "mov %1, %%esp\n\t" // Restore ESP
575 "mov %2, %%ebp\n\t" // and EBP
576 "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler)
577 "a"(SWITCH_MAGIC), "b"(gCurrentThread->SavedState.ESP),
578 "d"(gCurrentThread->SavedState.EBP), "c"(gCurrentThread->SavedState.EIP)
580 for(;;); // Shouldn't reach here
584 EXPORT(Proc_SpawnWorker);