16 #define DEBUG_TRACE_SWITCH 0
19 #define SWITCH_MAGIC 0x55ECAFFF##FFFACE55 // There is no code in this area
21 #define TIMER_DIVISOR 11931 //~100Hz
27 Uint8 State; // 0: Unavaliable, 1: Idle, 2: Active
35 extern void APStartup(void); // 16-bit AP startup code
36 extern Uint GetRIP(void); // start.asm
37 extern Uint64 gInitialPML4[512]; // start.asm
38 extern char gInitialKernelStack[];
39 extern tShortSpinlock glThreadListLock;
42 extern int giTotalTickets;
43 extern int giNumActiveThreads;
44 extern tThread gThreadZero;
45 extern void Threads_Dump(void);
46 extern void Proc_ReturnToUser(void);
47 extern int GetCPUNum(void);
50 void ArchThreads_Init(void);
52 void MP_StartAP(int CPU);
53 void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode);
55 //void Proc_Start(void);
56 //tThread *Proc_GetCurThread(void);
57 void Proc_ChangeStack(void);
58 // int Proc_Clone(Uint *Err, Uint Flags);
59 // int Proc_SpawnWorker(void);
60 Uint Proc_MakeUserStack(void);
61 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize);
62 void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP);
63 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs);
64 void Proc_CallFaultHandler(tThread *Thread);
65 void Proc_Scheduler(int CPU);
68 // --- Multiprocessing ---
70 volatile int giNumInitingCPUs = 0;
71 tMPInfo *gMPFloatPtr = NULL;
72 tAPIC *gpMP_LocalAPIC = NULL;
73 Uint8 gaAPIC_to_CPU[256] = {0};
75 tCPU gaCPUs[MAX_CPUS];
78 // --- Error Recovery ---
79 Uint32 gaDoubleFaultStack[1024];
83 * \fn void ArchThreads_Init(void)
84 * \brief Starts the process scheduler
86 void ArchThreads_Init(void)
96 // -- Initialise Multiprocessing
97 // Find MP Floating Table
98 // - EBDA/Last 1Kib (640KiB)
99 for(pos = KERNEL_BASE|0x9F000; pos < (KERNEL_BASE|0xA0000); pos += 16) {
100 if( *(Uint*)(pos) == MPPTR_IDENT ) {
101 Log("Possible %p", pos);
102 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
103 gMPFloatPtr = (void*)pos;
107 // - Last KiB (512KiB base mem)
109 for(pos = KERNEL_BASE|0x7F000; pos < (KERNEL_BASE|0x80000); pos += 16) {
110 if( *(Uint*)(pos) == MPPTR_IDENT ) {
111 Log("Possible %p", pos);
112 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
113 gMPFloatPtr = (void*)pos;
120 for(pos = KERNEL_BASE|0xE0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
121 if( *(Uint*)(pos) == MPPTR_IDENT ) {
122 Log("Possible %p", pos);
123 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
124 gMPFloatPtr = (void*)pos;
130 // If the MP Table Exists, parse it
135 Log("gMPFloatPtr = %p", gMPFloatPtr);
136 Log("*gMPFloatPtr = {");
137 Log("\t.Sig = 0x%08x", gMPFloatPtr->Sig);
138 Log("\t.MPConfig = 0x%08x", gMPFloatPtr->MPConfig);
139 Log("\t.Length = 0x%02x", gMPFloatPtr->Length);
140 Log("\t.Version = 0x%02x", gMPFloatPtr->Version);
141 Log("\t.Checksum = 0x%02x", gMPFloatPtr->Checksum);
142 Log("\t.Features = [0x%02x,0x%02x,0x%02x,0x%02x,0x%02x]",
143 gMPFloatPtr->Features[0], gMPFloatPtr->Features[1],
144 gMPFloatPtr->Features[2], gMPFloatPtr->Features[3],
145 gMPFloatPtr->Features[4]
149 mptable = (void*)( KERNEL_BASE|gMPFloatPtr->MPConfig );
150 Log("mptable = %p", mptable);
152 Log("\t.Sig = 0x%08x", mptable->Sig);
153 Log("\t.BaseTableLength = 0x%04x", mptable->BaseTableLength);
154 Log("\t.SpecRev = 0x%02x", mptable->SpecRev);
155 Log("\t.Checksum = 0x%02x", mptable->Checksum);
156 Log("\t.OEMID = '%8c'", mptable->OemID);
157 Log("\t.ProductID = '%8c'", mptable->ProductID);
158 Log("\t.OEMTablePtr = %p'", mptable->OEMTablePtr);
159 Log("\t.OEMTableSize = 0x%04x", mptable->OEMTableSize);
160 Log("\t.EntryCount = 0x%04x", mptable->EntryCount);
161 Log("\t.LocalAPICMemMap = 0x%08x", mptable->LocalAPICMemMap);
162 Log("\t.ExtendedTableLen = 0x%04x", mptable->ExtendedTableLen);
163 Log("\t.ExtendedTableChecksum = 0x%02x", mptable->ExtendedTableChecksum);
166 gpMP_LocalAPIC = (void*)MM_MapHWPage(mptable->LocalAPICMemMap, 1);
168 ents = mptable->Entries;
171 for( i = 0; i < mptable->EntryCount; i ++ )
178 Log("%i: Processor", i);
179 Log("\t.APICID = %i", ents->Proc.APICID);
180 Log("\t.APICVer = 0x%02x", ents->Proc.APICVer);
181 Log("\t.CPUFlags = 0x%02x", ents->Proc.CPUFlags);
182 Log("\t.CPUSignature = 0x%08x", ents->Proc.CPUSignature);
183 Log("\t.FeatureFlags = 0x%08x", ents->Proc.FeatureFlags);
186 if( !(ents->Proc.CPUFlags & 1) ) {
191 // Check if there is too many processors
192 if(giNumCPUs >= MAX_CPUS) {
193 giNumCPUs ++; // If `giNumCPUs` > MAX_CPUS later, it will be clipped
197 // Initialise CPU Info
198 gaAPIC_to_CPU[ents->Proc.APICID] = giNumCPUs;
199 gaCPUs[giNumCPUs].APICID = ents->Proc.APICID;
200 gaCPUs[giNumCPUs].State = 0;
204 if( !(ents->Proc.CPUFlags & 2) )
206 MP_StartAP( giNumCPUs-1 );
213 Log("\t.ID = %i", ents->Bus.ID);
214 Log("\t.TypeString = '%6c'", ents->Bus.TypeString);
218 Log("%i: I/O APIC", i);
219 Log("\t.ID = %i", ents->IOAPIC.ID);
220 Log("\t.Version = 0x%02x", ents->IOAPIC.Version);
221 Log("\t.Flags = 0x%02x", ents->IOAPIC.Flags);
222 Log("\t.Addr = 0x%08x", ents->IOAPIC.Addr);
224 case 3: // I/O Interrupt Assignment
226 Log("%i: I/O Interrupt Assignment", i);
227 Log("\t.IntType = %i", ents->IOInt.IntType);
228 Log("\t.Flags = 0x%04x", ents->IOInt.Flags);
229 Log("\t.SourceBusID = 0x%02x", ents->IOInt.SourceBusID);
230 Log("\t.SourceBusIRQ = 0x%02x", ents->IOInt.SourceBusIRQ);
231 Log("\t.DestAPICID = 0x%02x", ents->IOInt.DestAPICID);
232 Log("\t.DestAPICIRQ = 0x%02x", ents->IOInt.DestAPICIRQ);
234 case 4: // Local Interrupt Assignment
236 Log("%i: Local Interrupt Assignment", i);
237 Log("\t.IntType = %i", ents->LocalInt.IntType);
238 Log("\t.Flags = 0x%04x", ents->LocalInt.Flags);
239 Log("\t.SourceBusID = 0x%02x", ents->LocalInt.SourceBusID);
240 Log("\t.SourceBusIRQ = 0x%02x", ents->LocalInt.SourceBusIRQ);
241 Log("\t.DestLocalAPICID = 0x%02x", ents->LocalInt.DestLocalAPICID);
242 Log("\t.DestLocalAPICIRQ = 0x%02x", ents->LocalInt.DestLocalAPICIRQ);
245 Log("%i: Unknown (%i)", i, ents->Type);
248 ents = (void*)( (Uint)ents + entSize );
251 if( giNumCPUs > MAX_CPUS ) {
252 Warning("Too many CPUs detected (%i), only using %i of them", giNumCPUs, MAX_CPUS);
253 giNumCPUs = MAX_CPUS;
256 while( giNumInitingCPUs )
257 MM_FinishVirtualInit();
259 Panic("Uh oh... MP Table Parsing is unimplemented\n");
262 Log("No MP Table was found, assuming uniprocessor\n");
269 MM_FinishVirtualInit();
273 // Initialise Normal TSS(s)
274 for(pos=0;pos<giNumCPUs;pos++)
279 gTSSs[pos].RSP0 = 0; // Set properly by scheduler
280 gGDT[7+pos*2].LimitLow = sizeof(tTSS) & 0xFFFF;
281 gGDT[7+pos*2].BaseLow = ((Uint)(&gTSSs[pos])) & 0xFFFF;
282 gGDT[7+pos*2].BaseMid = ((Uint)(&gTSSs[pos])) >> 16;
283 gGDT[7+pos*2].BaseHi = ((Uint)(&gTSSs[pos])) >> 24;
284 gGDT[7+pos*2+1].DWord[0] = ((Uint)(&gTSSs[pos])) >> 32;
287 for(pos=0;pos<giNumCPUs;pos++) {
289 __asm__ __volatile__ ("ltr %%ax"::"a"(0x38+pos*16));
294 // Set Debug registers
295 __asm__ __volatile__ ("mov %0, %%db0" : : "r"(&gThreadZero));
296 __asm__ __volatile__ ("mov %%rax, %%db1" : : "a"(0));
298 gaCPUs[0].Current = &gThreadZero;
300 gThreadZero.MemState.CR3 = (Uint)gInitialPML4 - KERNEL_BASE;
301 gThreadZero.CurCPU = 0;
303 // Set timer frequency
304 outb(0x43, 0x34); // Set Channel 0, Low/High, Rate Generator
305 outb(0x40, TIMER_DIVISOR&0xFF); // Low Byte of Divisor
306 outb(0x40, (TIMER_DIVISOR>>8)&0xFF); // High Byte
308 // Create Per-Process Data Block
309 if( !MM_Allocate(MM_PPD_CFG) )
311 Warning("Oh, hell, Unable to allocate PPD for Thread#0");
317 Log("Multithreading initialised");
321 void MP_StartAP(int CPU)
323 Log("Starting AP %i (APIC %i)", CPU, gaCPUs[CPU].APICID);
324 // Set location of AP startup code and mark for a warm restart
325 *(Uint16*)(KERNEL_BASE|0x467) = (Uint)&APStartup - (KERNEL_BASE|0xFFFF0);
326 *(Uint16*)(KERNEL_BASE|0x469) = 0xFFFF;
327 outb(0x70, 0x0F); outb(0x71, 0x0A); // Warm Reset
328 MP_SendIPI(gaCPUs[CPU].APICID, 0, 5);
332 void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode)
334 Uint32 addr = (Uint)gpMP_LocalAPIC + 0x300;
338 val = (Uint)APICID << 24;
339 Log("*%p = 0x%08x", addr+0x10, val);
340 *(Uint32*)(addr+0x10) = val;
342 val = ((DeliveryMode & 7) << 8) | (Vector & 0xFF);
343 Log("*%p = 0x%08x", addr, val);
344 *(Uint32*)addr = val;
349 * \fn void Proc_Start(void)
350 * \brief Start process scheduler
352 void Proc_Start(void)
360 for( i = 0; i < giNumCPUs; i ++ )
363 if(i) gaCPUs[i].Current = NULL;
366 if( (tid = Proc_Clone(0, 0)) == 0)
368 for(;;) HALT(); // Just yeilds
370 gaCPUs[i].IdleThread = Threads_GetThread(tid);
371 gaCPUs[i].IdleThread->ThreadName = "Idle Thread";
372 Threads_SetTickets( gaCPUs[i].IdleThread, 0 ); // Never called randomly
373 gaCPUs[i].IdleThread->Quantum = 1; // 1 slice quantum
377 if( i != giProc_BootProcessorID ) {
382 // BSP still should run the current task
383 gaCPUs[0].Current = &gThreadZero;
385 // Start interrupts and wait for APs to come up
386 Log("Waiting for APs to come up\n");
387 __asm__ __volatile__ ("sti");
388 while( giNumInitingCPUs ) __asm__ __volatile__ ("hlt");
391 if(Proc_Clone(0, 0) == 0)
393 gaCPUs[0].IdleThread = Proc_GetCurThread();
394 gaCPUs[0].IdleThread->ThreadName = (char*)"Idle Thread";
395 Threads_SetPriority( gaCPUs[0].IdleThread, -1 ); // Never called randomly
396 gaCPUs[0].IdleThread->Quantum = 1; // 1 slice quantum
397 for(;;) HALT(); // Just yeilds
401 gaCPUs[0].Current = &gThreadZero;
402 gaCPUs[0].Current->CurCPU = 0;
404 // Start Interrupts (and hence scheduler)
405 __asm__ __volatile__("sti");
407 MM_FinishVirtualInit();
408 Log("Multithreading started");
412 * \fn tThread *Proc_GetCurThread(void)
413 * \brief Gets the current thread
415 tThread *Proc_GetCurThread(void)
418 return gaCPUs[ GetCPUNum() ].Current;
420 return gaCPUs[ 0 ].Current;
425 * \fn void Proc_ChangeStack(void)
426 * \brief Swaps the current stack for a new one (in the proper stack reigon)
428 void Proc_ChangeStack(void)
431 Uint tmp_rbp, old_rsp;
432 Uint curBase, newBase;
434 __asm__ __volatile__ ("mov %%rsp, %0":"=r"(rsp));
435 __asm__ __volatile__ ("mov %%rbp, %0":"=r"(rbp));
440 newBase = MM_NewKStack();
443 Panic("What the?? Unable to allocate space for initial kernel stack");
447 curBase = (Uint)&gInitialKernelStack;
449 Log("curBase = 0x%x, newBase = 0x%x", curBase, newBase);
451 // Get ESP as a used size
453 Log("memcpy( %p, %p, 0x%x )", (void*)(newBase - rsp), (void*)(curBase - rsp), rsp );
455 memcpy( (void*)(newBase - rsp), (void*)(curBase - rsp), rsp );
456 // Get ESP as an offset in the new stack
459 rbp = newBase - (curBase - rbp);
462 // Repair EBPs & Stack Addresses
463 // Catches arguments also, but may trash stack-address-like values
464 for(tmp_rbp = rsp; tmp_rbp < newBase; tmp_rbp += sizeof(Uint))
466 if(old_rsp < *(Uint*)tmp_rbp && *(Uint*)tmp_rbp < curBase)
467 *(Uint*)tmp_rbp += newBase - curBase;
470 Log("Applying Changes");
471 Proc_GetCurThread()->KernelStack = newBase;
472 __asm__ __volatile__ ("mov %0, %%rsp"::"r"(rsp));
473 __asm__ __volatile__ ("mov %0, %%rbp"::"r"(rbp));
477 * \fn int Proc_Clone(Uint *Err, Uint Flags)
478 * \brief Clone the current process
480 int Proc_Clone(Uint *Err, Uint Flags)
483 tThread *cur = Proc_GetCurThread();
486 __asm__ __volatile__ ("mov %%rsp, %0": "=r"(rsp));
487 __asm__ __volatile__ ("mov %%rbp, %0": "=r"(rbp));
489 newThread = Threads_CloneTCB(Err, Flags);
490 if(!newThread) return -1;
492 Log("Proc_Clone: newThread = %p", newThread);
494 // Initialise Memory Space (New Addr space or kernel stack)
495 if(Flags & CLONE_VM) {
496 Log("Proc_Clone: Cloning VM");
497 newThread->MemState.CR3 = MM_Clone();
498 newThread->KernelStack = cur->KernelStack;
500 Uint tmp_rbp, old_rsp = rsp;
503 newThread->MemState.CR3 = cur->MemState.CR3;
506 newThread->KernelStack = MM_NewKStack();
507 Log("Proc_Clone: newKStack = %p", newThread->KernelStack);
509 if(newThread->KernelStack == 0) {
514 // Get ESP as a used size
515 rsp = cur->KernelStack - rsp;
518 (void*)(newThread->KernelStack - rsp),
519 (void*)(cur->KernelStack - rsp),
522 // Get ESP as an offset in the new stack
523 rsp = newThread->KernelStack - rsp;
525 rbp = newThread->KernelStack - (cur->KernelStack - rbp);
527 // Repair EBPs & Stack Addresses
528 // Catches arguments also, but may trash stack-address-like values
529 for(tmp_rbp = rsp; tmp_rbp < newThread->KernelStack; tmp_rbp += sizeof(Uint))
531 if(old_rsp < *(Uint*)tmp_rbp && *(Uint*)tmp_rbp < cur->KernelStack)
532 *(Uint*)tmp_rbp += newThread->KernelStack - cur->KernelStack;
536 // Save core machine state
537 newThread->SavedState.RSP = rsp;
538 newThread->SavedState.RBP = rbp;
540 if(rip == SWITCH_MAGIC) {
541 outb(0x20, 0x20); // ACK Timer and return as child
546 newThread->SavedState.RIP = rip;
548 // Lock list and add to active
549 Threads_AddActive(newThread);
551 return newThread->TID;
555 * \fn int Proc_SpawnWorker(void)
556 * \brief Spawns a new worker thread
558 int Proc_SpawnWorker(void)
563 cur = Proc_GetCurThread();
566 new = malloc( sizeof(tThread) );
568 Warning("Proc_SpawnWorker - Out of heap space!\n");
571 memcpy(new, &gThreadZero, sizeof(tThread));
573 new->TID = giNextTID++;
574 // Create a new worker stack (in PID0's address space)
575 // The stack is relocated by this code
576 new->KernelStack = MM_NewWorkerStack();
578 // Get ESP and EBP based in the new stack
579 __asm__ __volatile__ ("mov %%rsp, %0": "=r"(rsp));
580 __asm__ __volatile__ ("mov %%rbp, %0": "=r"(rbp));
581 rsp = new->KernelStack - (cur->KernelStack - rsp);
582 rbp = new->KernelStack - (cur->KernelStack - rbp);
584 // Save core machine state
585 new->SavedState.RSP = rsp;
586 new->SavedState.RBP = rbp;
588 if(rip == SWITCH_MAGIC) {
589 outb(0x20, 0x20); // ACK Timer and return as child
594 new->SavedState.RIP = rip;
596 new->Status = THREAD_STAT_ACTIVE;
597 Threads_AddActive( new );
603 * \fn Uint Proc_MakeUserStack(void)
604 * \brief Creates a new user stack
606 Uint Proc_MakeUserStack(void)
609 Uint base = USER_STACK_TOP - USER_STACK_SZ;
611 // Check Prospective Space
612 for( i = USER_STACK_SZ >> 12; i--; )
613 if( MM_GetPhysAddr( base + (i<<12) ) != 0 )
616 if(i != -1) return 0;
618 // Allocate Stack - Allocate incrementally to clean up MM_Dump output
619 for( i = 0; i < USER_STACK_SZ/0x1000; i++ )
621 if( !MM_Allocate( base + (i<<12) ) )
624 Log_Error("Proc", "Unable to allocate user stack (%i pages requested)", USER_STACK_SZ/0x1000);
626 MM_Deallocate( base + (i<<12) );
631 return base + USER_STACK_SZ;
636 * \fn void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
637 * \brief Starts a user task
639 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
641 Uint *stack = (void*)Proc_MakeUserStack();
646 LOG("stack = 0x%x", stack);
649 stack = (void*)( (Uint)stack - DataSize );
650 memcpy( stack, ArgV, DataSize );
652 // Adjust Arguments and environment
653 delta = (Uint)stack - (Uint)ArgV;
654 ArgV = (char**)stack;
655 for( i = 0; ArgV[i]; i++ ) ArgV[i] += delta;
658 for( i = 0; EnvP[i]; i++ ) EnvP[i] += delta;
660 // User Mode Segments
661 ss = 0x23; cs = 0x1B;
664 *--stack = (Uint)EnvP;
665 *--stack = (Uint)ArgV;
666 *--stack = (Uint)ArgC;
669 *--stack = 0; // Return Address
671 Proc_StartProcess(ss, (Uint)stack, 0x202, cs, Entrypoint);
674 void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP)
676 Uint *stack = (void*)Stack;
677 *--stack = SS; //Stack Segment
678 *--stack = Stack; //Stack Pointer
679 *--stack = Flags; //EFLAGS (Resvd (0x2) and IF (0x20))
680 *--stack = CS; //Code Segment
683 *--stack = 0xAAAAAAAA; // eax
684 *--stack = 0xCCCCCCCC; // ecx
685 *--stack = 0xDDDDDDDD; // edx
686 *--stack = 0xBBBBBBBB; // ebx
687 *--stack = 0xD1D1D1D1; // edi
688 *--stack = 0x54545454; // rsp - NOT POPED
689 *--stack = 0x51515151; // esi
690 *--stack = 0xB4B4B4B4; // rbp
694 __asm__ __volatile__ (
695 "mov %%rax,%%rsp;\n\t" // Set stack pointer
696 "iret;\n\t" : : "a" (stack));
701 * \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
702 * \brief Demotes a process to a lower permission level
703 * \param Err Pointer to user's errno
704 * \param Dest New Permission Level
705 * \param Regs Pointer to user's register structure
707 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
709 int cpl = Regs->CS & 3;
711 if(Dest > 3 || Dest < 0) {
722 // Change the Segment Registers
723 Regs->CS = (((Dest+1)<<4) | Dest) - 8;
724 Regs->SS = ((Dest+1)<<4) | Dest;
730 * \brief Calls a signal handler in user mode
731 * \note Used for signals
733 void Proc_CallFaultHandler(tThread *Thread)
735 // Rewinds the stack and calls the user function
737 __asm__ __volatile__ ("mov %0, %%rbp;\n\tcall Proc_ReturnToUser" :: "r"(Thread->FaultHandler));
742 * \fn void Proc_Scheduler(int CPU)
743 * \brief Swap current thread and clears dead threads
745 void Proc_Scheduler(int CPU)
750 // If the spinlock is set, let it complete
751 if(IS_LOCKED(&glThreadListLock)) return;
753 // Get current thread
754 thread = gaCPUs[CPU].Current;
756 // Reduce remaining quantum and continue timeslice if non-zero
757 if(thread->Remaining--) return;
758 // Reset quantum for next call
759 thread->Remaining = thread->Quantum;
762 __asm__ __volatile__ ("mov %%rsp, %0":"=r"(rsp));
763 __asm__ __volatile__ ("mov %%rbp, %0":"=r"(rbp));
765 if(rip == SWITCH_MAGIC) return; // Check if a switch happened
767 // Save machine state
768 thread->SavedState.RSP = rsp;
769 thread->SavedState.RBP = rbp;
770 thread->SavedState.RIP = rip;
773 thread = Threads_GetNextToRun(CPU, thread);
777 thread = gaCPUs[CPU].IdleThread;
778 //Warning("Hmm... Threads_GetNextToRun returned NULL, I don't think this should happen.\n");
783 #if DEBUG_TRACE_SWITCH
784 LogF("Switching to task %i, CR3 = 0x%x, RIP = %p",
786 thread->MemState.CR3,
787 thread->SavedState.RIP
793 LogF("CPU = %i", CPU);
794 // Set current thread
795 gaCPUs[CPU].Current = thread;
797 // Update Kernel Stack pointer
798 gTSSs[CPU].RSP0 = thread->KernelStack-4;
801 __asm__ __volatile__ ("mov %0, %%cr3"::"a"(thread->MemState.CR3));
804 __asm__ __volatile__ (
805 "mov %1, %%rsp\n\t" // Restore RSP
806 "mov %2, %%rbp\n\t" // and RBP
807 "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler)
808 "a"(SWITCH_MAGIC), "b"(thread->SavedState.RSP),
809 "d"(thread->SavedState.RBP), "c"(thread->SavedState.RIP)
811 for(;;); // Shouldn't reach here
815 EXPORT(Proc_SpawnWorker);