8 #include <threads_int.h>
15 #include <arch_config.h>
19 #define DEBUG_TRACE_SWITCH 0
20 //#define BREAK_ON_SWITCH 1 // Break into bochs debugger on a task switch
23 #define SWITCH_MAGIC 0x55ECAFFF##FFFACE55 // There is no code in this area
29 Uint8 State; // 0: Unavaliable, 1: Idle, 2: Active
37 extern void APStartup(void); // 16-bit AP startup code
38 extern Uint GetRIP(void); // start.asm
39 extern Uint64 gInitialPML4[512]; // start.asm
40 extern char gInitialKernelStack[];
41 extern tShortSpinlock glThreadListLock;
44 extern int giTotalTickets;
45 extern int giNumActiveThreads;
46 extern tThread gThreadZero;
47 extern void Threads_Dump(void);
48 extern void Proc_ReturnToUser(void);
49 extern void Time_UpdateTimestamp(void);
52 //void ArchThreads_Init(void);
54 void MP_StartAP(int CPU);
55 void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode);
57 //void Proc_Start(void);
58 //tThread *Proc_GetCurThread(void);
59 void Proc_ChangeStack(void);
60 // int Proc_Clone(Uint *Err, Uint Flags);
61 // int Proc_SpawnWorker(void);
62 Uint Proc_MakeUserStack(void);
63 //void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize);
64 void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP);
65 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs);
66 //void Proc_CallFaultHandler(tThread *Thread);
67 //void Proc_DumpThreadCPUState(tThread *Thread);
68 void Proc_Scheduler(int CPU);
71 // --- Multiprocessing ---
73 volatile int giNumInitingCPUs = 0;
74 tMPInfo *gMPFloatPtr = NULL;
75 tAPIC *gpMP_LocalAPIC = NULL;
76 Uint8 gaAPIC_to_CPU[256] = {0};
78 tCPU gaCPUs[MAX_CPUS];
81 // --- Error Recovery ---
82 Uint32 gaDoubleFaultStack[1024];
86 * \fn void ArchThreads_Init(void)
87 * \brief Starts the process scheduler
89 void ArchThreads_Init(void)
99 // -- Initialise Multiprocessing
100 // Find MP Floating Table
101 // - EBDA/Last 1Kib (640KiB)
102 for(pos = KERNEL_BASE|0x9F000; pos < (KERNEL_BASE|0xA0000); pos += 16) {
103 if( *(Uint*)(pos) == MPPTR_IDENT ) {
104 Log("Possible %p", pos);
105 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
106 gMPFloatPtr = (void*)pos;
110 // - Last KiB (512KiB base mem)
112 for(pos = KERNEL_BASE|0x7F000; pos < (KERNEL_BASE|0x80000); pos += 16) {
113 if( *(Uint*)(pos) == MPPTR_IDENT ) {
114 Log("Possible %p", pos);
115 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
116 gMPFloatPtr = (void*)pos;
123 for(pos = KERNEL_BASE|0xE0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
124 if( *(Uint*)(pos) == MPPTR_IDENT ) {
125 Log("Possible %p", pos);
126 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
127 gMPFloatPtr = (void*)pos;
133 // If the MP Table Exists, parse it
138 Log("gMPFloatPtr = %p", gMPFloatPtr);
139 Log("*gMPFloatPtr = {");
140 Log("\t.Sig = 0x%08x", gMPFloatPtr->Sig);
141 Log("\t.MPConfig = 0x%08x", gMPFloatPtr->MPConfig);
142 Log("\t.Length = 0x%02x", gMPFloatPtr->Length);
143 Log("\t.Version = 0x%02x", gMPFloatPtr->Version);
144 Log("\t.Checksum = 0x%02x", gMPFloatPtr->Checksum);
145 Log("\t.Features = [0x%02x,0x%02x,0x%02x,0x%02x,0x%02x]",
146 gMPFloatPtr->Features[0], gMPFloatPtr->Features[1],
147 gMPFloatPtr->Features[2], gMPFloatPtr->Features[3],
148 gMPFloatPtr->Features[4]
152 mptable = (void*)( KERNEL_BASE|gMPFloatPtr->MPConfig );
153 Log("mptable = %p", mptable);
155 Log("\t.Sig = 0x%08x", mptable->Sig);
156 Log("\t.BaseTableLength = 0x%04x", mptable->BaseTableLength);
157 Log("\t.SpecRev = 0x%02x", mptable->SpecRev);
158 Log("\t.Checksum = 0x%02x", mptable->Checksum);
159 Log("\t.OEMID = '%8c'", mptable->OemID);
160 Log("\t.ProductID = '%8c'", mptable->ProductID);
161 Log("\t.OEMTablePtr = %p'", mptable->OEMTablePtr);
162 Log("\t.OEMTableSize = 0x%04x", mptable->OEMTableSize);
163 Log("\t.EntryCount = 0x%04x", mptable->EntryCount);
164 Log("\t.LocalAPICMemMap = 0x%08x", mptable->LocalAPICMemMap);
165 Log("\t.ExtendedTableLen = 0x%04x", mptable->ExtendedTableLen);
166 Log("\t.ExtendedTableChecksum = 0x%02x", mptable->ExtendedTableChecksum);
169 gpMP_LocalAPIC = (void*)MM_MapHWPage(mptable->LocalAPICMemMap, 1);
171 ents = mptable->Entries;
174 for( i = 0; i < mptable->EntryCount; i ++ )
181 Log("%i: Processor", i);
182 Log("\t.APICID = %i", ents->Proc.APICID);
183 Log("\t.APICVer = 0x%02x", ents->Proc.APICVer);
184 Log("\t.CPUFlags = 0x%02x", ents->Proc.CPUFlags);
185 Log("\t.CPUSignature = 0x%08x", ents->Proc.CPUSignature);
186 Log("\t.FeatureFlags = 0x%08x", ents->Proc.FeatureFlags);
189 if( !(ents->Proc.CPUFlags & 1) ) {
194 // Check if there is too many processors
195 if(giNumCPUs >= MAX_CPUS) {
196 giNumCPUs ++; // If `giNumCPUs` > MAX_CPUS later, it will be clipped
200 // Initialise CPU Info
201 gaAPIC_to_CPU[ents->Proc.APICID] = giNumCPUs;
202 gaCPUs[giNumCPUs].APICID = ents->Proc.APICID;
203 gaCPUs[giNumCPUs].State = 0;
207 if( !(ents->Proc.CPUFlags & 2) )
209 MP_StartAP( giNumCPUs-1 );
216 Log("\t.ID = %i", ents->Bus.ID);
217 Log("\t.TypeString = '%6c'", ents->Bus.TypeString);
221 Log("%i: I/O APIC", i);
222 Log("\t.ID = %i", ents->IOAPIC.ID);
223 Log("\t.Version = 0x%02x", ents->IOAPIC.Version);
224 Log("\t.Flags = 0x%02x", ents->IOAPIC.Flags);
225 Log("\t.Addr = 0x%08x", ents->IOAPIC.Addr);
227 case 3: // I/O Interrupt Assignment
229 Log("%i: I/O Interrupt Assignment", i);
230 Log("\t.IntType = %i", ents->IOInt.IntType);
231 Log("\t.Flags = 0x%04x", ents->IOInt.Flags);
232 Log("\t.SourceBusID = 0x%02x", ents->IOInt.SourceBusID);
233 Log("\t.SourceBusIRQ = 0x%02x", ents->IOInt.SourceBusIRQ);
234 Log("\t.DestAPICID = 0x%02x", ents->IOInt.DestAPICID);
235 Log("\t.DestAPICIRQ = 0x%02x", ents->IOInt.DestAPICIRQ);
237 case 4: // Local Interrupt Assignment
239 Log("%i: Local Interrupt Assignment", i);
240 Log("\t.IntType = %i", ents->LocalInt.IntType);
241 Log("\t.Flags = 0x%04x", ents->LocalInt.Flags);
242 Log("\t.SourceBusID = 0x%02x", ents->LocalInt.SourceBusID);
243 Log("\t.SourceBusIRQ = 0x%02x", ents->LocalInt.SourceBusIRQ);
244 Log("\t.DestLocalAPICID = 0x%02x", ents->LocalInt.DestLocalAPICID);
245 Log("\t.DestLocalAPICIRQ = 0x%02x", ents->LocalInt.DestLocalAPICIRQ);
248 Log("%i: Unknown (%i)", i, ents->Type);
251 ents = (void*)( (Uint)ents + entSize );
254 if( giNumCPUs > MAX_CPUS ) {
255 Warning("Too many CPUs detected (%i), only using %i of them", giNumCPUs, MAX_CPUS);
256 giNumCPUs = MAX_CPUS;
259 while( giNumInitingCPUs )
260 MM_FinishVirtualInit();
262 Panic("Uh oh... MP Table Parsing is unimplemented\n");
265 Log("No MP Table was found, assuming uniprocessor\n");
272 MM_FinishVirtualInit();
276 // Initialise Normal TSS(s)
277 for(pos=0;pos<giNumCPUs;pos++)
282 gTSSs[pos].RSP0 = 0; // Set properly by scheduler
283 gGDT[7+pos*2].LimitLow = sizeof(tTSS) & 0xFFFF;
284 gGDT[7+pos*2].BaseLow = ((Uint)(&gTSSs[pos])) & 0xFFFF;
285 gGDT[7+pos*2].BaseMid = ((Uint)(&gTSSs[pos])) >> 16;
286 gGDT[7+pos*2].BaseHi = ((Uint)(&gTSSs[pos])) >> 24;
287 gGDT[7+pos*2+1].DWord[0] = ((Uint)(&gTSSs[pos])) >> 32;
290 for(pos=0;pos<giNumCPUs;pos++) {
291 __asm__ __volatile__ ("ltr %%ax"::"a"(0x38+pos*16));
294 __asm__ __volatile__ ("ltr %%ax"::"a"(0x38));
297 // Set Debug registers
298 __asm__ __volatile__ ("mov %0, %%db0" : : "r"(&gThreadZero));
299 __asm__ __volatile__ ("mov %%rax, %%db1" : : "a"(0));
301 gaCPUs[0].Current = &gThreadZero;
303 gThreadZero.MemState.CR3 = (Uint)gInitialPML4 - KERNEL_BASE;
304 gThreadZero.CurCPU = 0;
306 // Set timer frequency
307 outb(0x43, 0x34); // Set Channel 0, Low/High, Rate Generator
308 outb(0x40, PIT_TIMER_DIVISOR&0xFF); // Low Byte of Divisor
309 outb(0x40, (PIT_TIMER_DIVISOR>>8)&0xFF); // High Byte
311 // Create Per-Process Data Block
312 if( !MM_Allocate(MM_PPD_CFG) )
314 Warning("Oh, hell, Unable to allocate PPD for Thread#0");
320 Log("Multithreading initialised");
324 void MP_StartAP(int CPU)
326 Log("Starting AP %i (APIC %i)", CPU, gaCPUs[CPU].APICID);
327 // Set location of AP startup code and mark for a warm restart
328 *(Uint16*)(KERNEL_BASE|0x467) = (Uint)&APStartup - (KERNEL_BASE|0xFFFF0);
329 *(Uint16*)(KERNEL_BASE|0x469) = 0xFFFF;
330 outb(0x70, 0x0F); outb(0x71, 0x0A); // Warm Reset
331 MP_SendIPI(gaCPUs[CPU].APICID, 0, 5);
335 void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode)
337 Uint32 addr = (Uint)gpMP_LocalAPIC + 0x300;
341 val = (Uint)APICID << 24;
342 Log("*%p = 0x%08x", addr+0x10, val);
343 *(Uint32*)(addr+0x10) = val;
345 val = ((DeliveryMode & 7) << 8) | (Vector & 0xFF);
346 Log("*%p = 0x%08x", addr, val);
347 *(Uint32*)addr = val;
352 * \fn void Proc_Start(void)
353 * \brief Start process scheduler
355 void Proc_Start(void)
363 for( i = 0; i < giNumCPUs; i ++ )
366 if(i) gaCPUs[i].Current = NULL;
369 if( (tid = Proc_Clone(0, 0)) == 0)
371 for(;;) HALT(); // Just yeilds
373 gaCPUs[i].IdleThread = Threads_GetThread(tid);
374 gaCPUs[i].IdleThread->ThreadName = "Idle Thread";
375 Threads_SetTickets( gaCPUs[i].IdleThread, 0 ); // Never called randomly
376 gaCPUs[i].IdleThread->Quantum = 1; // 1 slice quantum
380 if( i != giProc_BootProcessorID ) {
385 // BSP still should run the current task
386 gaCPUs[0].Current = &gThreadZero;
388 // Start interrupts and wait for APs to come up
389 Log("Waiting for APs to come up\n");
390 __asm__ __volatile__ ("sti");
391 while( giNumInitingCPUs ) __asm__ __volatile__ ("hlt");
394 if(Proc_Clone(0) == 0)
396 gaCPUs[0].IdleThread = Proc_GetCurThread();
397 gaCPUs[0].IdleThread->ThreadName = (char*)"Idle Thread";
398 Threads_SetPriority( gaCPUs[0].IdleThread, -1 ); // Never called randomly
399 gaCPUs[0].IdleThread->Quantum = 1; // 1 slice quantum
400 for(;;) HALT(); // Just yeilds
404 gaCPUs[0].Current = &gThreadZero;
405 gaCPUs[0].Current->CurCPU = 0;
407 // Start Interrupts (and hence scheduler)
408 __asm__ __volatile__("sti");
410 MM_FinishVirtualInit();
411 Log("Multithreading started");
415 * \fn tThread *Proc_GetCurThread(void)
416 * \brief Gets the current thread
418 tThread *Proc_GetCurThread(void)
421 return gaCPUs[ GetCPUNum() ].Current;
423 return gaCPUs[ 0 ].Current;
428 * \fn void Proc_ChangeStack(void)
429 * \brief Swaps the current stack for a new one (in the proper stack reigon)
431 void Proc_ChangeStack(void)
434 Uint tmp_rbp, old_rsp;
435 Uint curBase, newBase;
437 __asm__ __volatile__ ("mov %%rsp, %0":"=r"(rsp));
438 __asm__ __volatile__ ("mov %%rbp, %0":"=r"(rbp));
443 newBase = MM_NewKStack();
446 Panic("What the?? Unable to allocate space for initial kernel stack");
450 curBase = (Uint)&gInitialKernelStack;
452 Log("curBase = 0x%x, newBase = 0x%x", curBase, newBase);
454 // Get ESP as a used size
456 Log("memcpy( %p, %p, 0x%x )", (void*)(newBase - rsp), (void*)(curBase - rsp), rsp );
458 memcpy( (void*)(newBase - rsp), (void*)(curBase - rsp), rsp );
459 // Get ESP as an offset in the new stack
462 rbp = newBase - (curBase - rbp);
465 // Repair EBPs & Stack Addresses
466 // Catches arguments also, but may trash stack-address-like values
467 for(tmp_rbp = rsp; tmp_rbp < newBase; tmp_rbp += sizeof(Uint))
469 if(old_rsp < *(Uint*)tmp_rbp && *(Uint*)tmp_rbp < curBase)
470 *(Uint*)tmp_rbp += newBase - curBase;
473 Log("Applying Changes");
474 Proc_GetCurThread()->KernelStack = newBase;
475 __asm__ __volatile__ ("mov %0, %%rsp"::"r"(rsp));
476 __asm__ __volatile__ ("mov %0, %%rbp"::"r"(rbp));
480 * \fn int Proc_Clone(Uint Flags)
481 * \brief Clone the current process
483 int Proc_Clone(Uint Flags)
486 tThread *cur = Proc_GetCurThread();
489 __asm__ __volatile__ ("mov %%rsp, %0": "=r"(rsp));
490 __asm__ __volatile__ ("mov %%rbp, %0": "=r"(rbp));
492 newThread = Threads_CloneTCB(NULL, Flags);
493 if(!newThread) return -1;
495 Log("Proc_Clone: newThread = %p", newThread);
497 // Initialise Memory Space (New Addr space or kernel stack)
498 if(Flags & CLONE_VM) {
499 Log("Proc_Clone: Cloning VM");
500 newThread->MemState.CR3 = MM_Clone();
501 newThread->KernelStack = cur->KernelStack;
504 Uint tmp_rbp, old_rsp = rsp;
507 newThread->MemState.CR3 = cur->MemState.CR3;
510 newThread->KernelStack = MM_NewKStack();
511 Log("Proc_Clone: newKStack = %p", newThread->KernelStack);
513 if(newThread->KernelStack == 0) {
518 // Get ESP as a used size
519 rsp = cur->KernelStack - rsp;
522 (void*)(newThread->KernelStack - rsp),
523 (void*)(cur->KernelStack - rsp),
526 // Get ESP as an offset in the new stack
527 rsp = newThread->KernelStack - rsp;
529 rbp = newThread->KernelStack - (cur->KernelStack - rbp);
531 // Repair EBPs & Stack Addresses
532 // Catches arguments also, but may trash stack-address-like values
533 for(tmp_rbp = rsp; tmp_rbp < newThread->KernelStack; tmp_rbp += sizeof(Uint))
535 if(old_rsp < *(Uint*)tmp_rbp && *(Uint*)tmp_rbp < cur->KernelStack)
536 *(Uint*)tmp_rbp += newThread->KernelStack - cur->KernelStack;
540 // Save core machine state
541 newThread->SavedState.RSP = rsp;
542 newThread->SavedState.RBP = rbp;
544 if(rip == SWITCH_MAGIC) {
545 outb(0x20, 0x20); // ACK Timer and return as child
546 __asm__ __volatile__ ("sti");
552 newThread->SavedState.RIP = rip;
554 // Lock list and add to active
555 Threads_AddActive(newThread);
557 return newThread->TID;
561 * \fn int Proc_SpawnWorker(void)
562 * \brief Spawns a new worker thread
564 int Proc_SpawnWorker(void)
569 cur = Proc_GetCurThread();
572 new = malloc( sizeof(tThread) );
574 Warning("Proc_SpawnWorker - Out of heap space!\n");
577 memcpy(new, &gThreadZero, sizeof(tThread));
579 new->TID = giNextTID++;
580 // Create a new worker stack (in PID0's address space)
581 // The stack is relocated by this code
582 new->KernelStack = MM_NewWorkerStack();
584 // Get ESP and EBP based in the new stack
585 __asm__ __volatile__ ("mov %%rsp, %0": "=r"(rsp));
586 __asm__ __volatile__ ("mov %%rbp, %0": "=r"(rbp));
587 rsp = new->KernelStack - (cur->KernelStack - rsp);
588 rbp = new->KernelStack - (cur->KernelStack - rbp);
590 // Save core machine state
591 new->SavedState.RSP = rsp;
592 new->SavedState.RBP = rbp;
594 if(rip == SWITCH_MAGIC) {
595 outb(0x20, 0x20); // ACK Timer and return as child
596 __asm__ __volatile__ ("sti");
601 new->SavedState.RIP = rip;
603 new->Status = THREAD_STAT_PREINIT;
604 Threads_AddActive( new );
610 * \fn Uint Proc_MakeUserStack(void)
611 * \brief Creates a new user stack
613 Uint Proc_MakeUserStack(void)
616 Uint base = USER_STACK_TOP - USER_STACK_SZ;
618 // Check Prospective Space
619 for( i = USER_STACK_SZ >> 12; i--; )
620 if( MM_GetPhysAddr( base + (i<<12) ) != 0 )
623 if(i != -1) return 0;
625 // Allocate Stack - Allocate incrementally to clean up MM_Dump output
626 for( i = 0; i < USER_STACK_SZ/0x1000; i++ )
628 if( !MM_Allocate( base + (i<<12) ) )
631 Log_Error("Proc", "Unable to allocate user stack (%i pages requested)", USER_STACK_SZ/0x1000);
633 MM_Deallocate( base + (i<<12) );
638 return base + USER_STACK_SZ;
643 * \fn void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
644 * \brief Starts a user task
646 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
648 Uint *stack = (void*)Proc_MakeUserStack();
653 LOG("stack = 0x%x", stack);
656 stack = (void*)( (Uint)stack - DataSize );
657 memcpy( stack, ArgV, DataSize );
659 // Adjust Arguments and environment
660 delta = (Uint)stack - (Uint)ArgV;
661 ArgV = (char**)stack;
662 for( i = 0; ArgV[i]; i++ ) ArgV[i] += delta;
665 for( i = 0; EnvP[i]; i++ ) EnvP[i] += delta;
667 // User Mode Segments
668 ss = 0x23; cs = 0x1B;
671 *--stack = (Uint)EnvP;
672 *--stack = (Uint)ArgV;
673 *--stack = (Uint)ArgC;
676 *--stack = 0; // Return Address
678 Proc_StartProcess(ss, (Uint)stack, 0x202, cs, Entrypoint);
681 void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP)
683 Uint *stack = (void*)Stack;
684 *--stack = SS; //Stack Segment
685 *--stack = Stack; //Stack Pointer
686 *--stack = Flags; //EFLAGS (Resvd (0x2) and IF (0x20))
687 *--stack = CS; //Code Segment
690 *--stack = 0xAAAAAAAA; // eax
691 *--stack = 0xCCCCCCCC; // ecx
692 *--stack = 0xDDDDDDDD; // edx
693 *--stack = 0xBBBBBBBB; // ebx
694 *--stack = 0xD1D1D1D1; // edi
695 *--stack = 0x54545454; // rsp - NOT POPED
696 *--stack = 0x51515151; // esi
697 *--stack = 0xB4B4B4B4; // rbp
701 __asm__ __volatile__ (
702 "mov %%rax,%%rsp;\n\t" // Set stack pointer
703 "iret;\n\t" : : "a" (stack));
708 * \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
709 * \brief Demotes a process to a lower permission level
710 * \param Err Pointer to user's errno
711 * \param Dest New Permission Level
712 * \param Regs Pointer to user's register structure
714 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
716 int cpl = Regs->CS & 3;
718 if(Dest > 3 || Dest < 0) {
729 // Change the Segment Registers
730 Regs->CS = (((Dest+1)<<4) | Dest) - 8;
731 Regs->SS = ((Dest+1)<<4) | Dest;
737 * \brief Calls a signal handler in user mode
738 * \note Used for signals
740 void Proc_CallFaultHandler(tThread *Thread)
742 // Rewinds the stack and calls the user function
744 __asm__ __volatile__ ("mov %0, %%rbp;\n\tcall Proc_ReturnToUser" :: "r"(Thread->FaultHandler));
748 void Proc_DumpThreadCPUState(tThread *Thread)
750 Log(" At %04x:%016llx", Thread->SavedState.UserCS, Thread->SavedState.UserRIP);
754 * \fn void Proc_Scheduler(int CPU)
755 * \brief Swap current thread and clears dead threads
757 void Proc_Scheduler(int CPU)
763 Time_UpdateTimestamp();
765 // If the spinlock is set, let it complete
766 if(IS_LOCKED(&glThreadListLock)) return;
768 // Get current thread
769 thread = gaCPUs[CPU].Current;
774 // Reduce remaining quantum and continue timeslice if non-zero
775 if(thread->Remaining--) return;
776 // Reset quantum for next call
777 thread->Remaining = thread->Quantum;
780 __asm__ __volatile__ ("mov %%rsp, %0":"=r"(rsp));
781 __asm__ __volatile__ ("mov %%rbp, %0":"=r"(rbp));
783 if(rip == SWITCH_MAGIC) return; // Check if a switch happened
785 // Save machine state
786 thread->SavedState.RSP = rsp;
787 thread->SavedState.RBP = rbp;
788 thread->SavedState.RIP = rip;
790 // TODO: Make this more stable somehow
791 regs = (tRegs*)(rbp+(2+1)*8); // RBP,Ret + CurThread
792 thread->SavedState.UserCS = regs->CS;
793 thread->SavedState.UserRIP = regs->RIP;
798 tThread *oldthread = thread;
802 thread = Threads_GetNextToRun(CPU, thread);
806 thread = gaCPUs[CPU].IdleThread;
807 //Warning("Hmm... Threads_GetNextToRun returned NULL, I don't think this should happen.\n");
811 if(thread == NULL ) {
815 if( thread != oldthread ) {
821 #if DEBUG_TRACE_SWITCH
822 LogF("Switching to task %i, CR3 = 0x%x, RIP = %p",
824 thread->MemState.CR3,
825 thread->SavedState.RIP
831 LogF("CPU = %i", CPU);
832 // Set current thread
833 gaCPUs[CPU].Current = thread;
835 // Update Kernel Stack pointer
836 gTSSs[CPU].RSP0 = thread->KernelStack-4;
839 __asm__ __volatile__ (
841 "mov %1, %%rsp\n\t" // Restore RSP
842 "mov %2, %%rbp\n\t" // and RBP
843 "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler)
844 "a"(SWITCH_MAGIC), "r"(thread->SavedState.RSP),
845 "r"(thread->SavedState.RBP), "r"(thread->SavedState.RIP),
846 "r"(thread->MemState.CR3)
848 for(;;); // Shouldn't reach here
852 EXPORT(Proc_SpawnWorker);