2 * AcessOS Microkernel Version
15 #define DEBUG_TRACE_SWITCH 0
18 #define SWITCH_MAGIC 0xFFFACE55 // There is no code in this area
20 #define TIMER_DIVISOR 11931 //~100Hz
25 extern void APStartup(); // 16-bit AP startup code
26 extern Uint GetEIP(); // start.asm
27 extern Uint32 gaInitPageDir[1024]; // start.asm
28 extern void Kernel_Stack_Top;
29 extern volatile int giThreadListLock;
32 extern int giTotalTickets;
33 extern int giNumActiveThreads;
34 extern tThread gThreadZero;
35 extern tThread *gActiveThreads;
36 extern tThread *gSleepingThreads;
37 extern tThread *gDeleteThreads;
38 extern tThread *Threads_GetNextToRun(int CPU);
39 extern void Threads_Dump();
40 extern tThread *Threads_CloneTCB(Uint *Err, Uint Flags);
41 extern void Isr8(); // Double Fault
44 void ArchThreads_Init();
46 void MP_StartAP(int CPU);
47 void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode);
50 tThread *Proc_GetCurThread();
51 void Proc_ChangeStack();
52 int Proc_Clone(Uint *Err, Uint Flags);
53 void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP);
54 void Proc_Scheduler();
57 // --- Multiprocessing ---
59 volatile int giNumInitingCPUs = 0;
60 tMPInfo *gMPFloatPtr = NULL;
61 tAPIC *gpMP_LocalAPIC = NULL;
62 Uint8 gaAPIC_to_CPU[256] = {0};
63 tCPU gaCPUs[MAX_CPUS];
65 tThread *gCurrentThread = NULL;
68 Uint32 *gPML4s[4] = NULL;
72 // --- Error Recovery ---
73 char gaDoubleFaultStack[1024];
74 tTSS gDoubleFault_TSS = {
75 .ESP0 = (Uint)&gaDoubleFaultStack[1023],
77 .CR3 = (Uint)gaInitPageDir - KERNEL_BASE,
79 .ESP = (Uint)&gaDoubleFaultStack[1023],
80 .CS = 0x08, .SS = 0x10,
81 .DS = 0x10, .ES = 0x10,
82 .FS = 0x10, .GS = 0x10,
87 * \fn void ArchThreads_Init()
88 * \brief Starts the process scheduler
90 void ArchThreads_Init()
100 // -- Initialise Multiprocessing
101 // Find MP Floating Table
102 // - EBDA/Last 1Kib (640KiB)
103 for(pos = KERNEL_BASE|0x9F000; pos < (KERNEL_BASE|0xA0000); pos += 16) {
104 if( *(Uint*)(pos) == MPPTR_IDENT ) {
105 Log("Possible %p", pos);
106 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
107 gMPFloatPtr = (void*)pos;
111 // - Last KiB (512KiB base mem)
113 for(pos = KERNEL_BASE|0x7F000; pos < (KERNEL_BASE|0x80000); pos += 16) {
114 if( *(Uint*)(pos) == MPPTR_IDENT ) {
115 Log("Possible %p", pos);
116 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
117 gMPFloatPtr = (void*)pos;
124 for(pos = KERNEL_BASE|0xE0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
125 if( *(Uint*)(pos) == MPPTR_IDENT ) {
126 Log("Possible %p", pos);
127 if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
128 gMPFloatPtr = (void*)pos;
134 // If the MP Table Exists, parse it
139 Log("gMPFloatPtr = %p", gMPFloatPtr);
140 Log("*gMPFloatPtr = {");
141 Log("\t.Sig = 0x%08x", gMPFloatPtr->Sig);
142 Log("\t.MPConfig = 0x%08x", gMPFloatPtr->MPConfig);
143 Log("\t.Length = 0x%02x", gMPFloatPtr->Length);
144 Log("\t.Version = 0x%02x", gMPFloatPtr->Version);
145 Log("\t.Checksum = 0x%02x", gMPFloatPtr->Checksum);
146 Log("\t.Features = [0x%02x,0x%02x,0x%02x,0x%02x,0x%02x]",
147 gMPFloatPtr->Features[0], gMPFloatPtr->Features[1],
148 gMPFloatPtr->Features[2], gMPFloatPtr->Features[3],
149 gMPFloatPtr->Features[4]
153 mptable = (void*)( KERNEL_BASE|gMPFloatPtr->MPConfig );
154 Log("mptable = %p", mptable);
156 Log("\t.Sig = 0x%08x", mptable->Sig);
157 Log("\t.BaseTableLength = 0x%04x", mptable->BaseTableLength);
158 Log("\t.SpecRev = 0x%02x", mptable->SpecRev);
159 Log("\t.Checksum = 0x%02x", mptable->Checksum);
160 Log("\t.OEMID = '%8c'", mptable->OemID);
161 Log("\t.ProductID = '%8c'", mptable->ProductID);
162 Log("\t.OEMTablePtr = %p'", mptable->OEMTablePtr);
163 Log("\t.OEMTableSize = 0x%04x", mptable->OEMTableSize);
164 Log("\t.EntryCount = 0x%04x", mptable->EntryCount);
165 Log("\t.LocalAPICMemMap = 0x%08x", mptable->LocalAPICMemMap);
166 Log("\t.ExtendedTableLen = 0x%04x", mptable->ExtendedTableLen);
167 Log("\t.ExtendedTableChecksum = 0x%02x", mptable->ExtendedTableChecksum);
170 gpMP_LocalAPIC = (void*)MM_MapHWPage(mptable->LocalAPICMemMap, 1);
172 ents = mptable->Entries;
175 for( i = 0; i < mptable->EntryCount; i ++ )
182 Log("%i: Processor", i);
183 Log("\t.APICID = %i", ents->Proc.APICID);
184 Log("\t.APICVer = 0x%02x", ents->Proc.APICVer);
185 Log("\t.CPUFlags = 0x%02x", ents->Proc.CPUFlags);
186 Log("\t.CPUSignature = 0x%08x", ents->Proc.CPUSignature);
187 Log("\t.FeatureFlags = 0x%08x", ents->Proc.FeatureFlags);
190 if( !(ents->Proc.CPUFlags & 1) ) {
195 // Check if there is too many processors
196 if(giNumCPUs >= MAX_CPUS) {
197 giNumCPUs ++; // If `giNumCPUs` > MAX_CPUS later, it will be clipped
201 // Initialise CPU Info
202 gaAPIC_to_CPU[ents->Proc.APICID] = giNumCPUs;
203 gaCPUs[giNumCPUs].APICID = ents->Proc.APICID;
204 gaCPUs[giNumCPUs].State = 0;
208 if( !(ents->Proc.CPUFlags & 2) )
210 MP_StartAP( giNumCPUs-1 );
217 Log("\t.ID = %i", ents->Bus.ID);
218 Log("\t.TypeString = '%6c'", ents->Bus.TypeString);
222 Log("%i: I/O APIC", i);
223 Log("\t.ID = %i", ents->IOAPIC.ID);
224 Log("\t.Version = 0x%02x", ents->IOAPIC.Version);
225 Log("\t.Flags = 0x%02x", ents->IOAPIC.Flags);
226 Log("\t.Addr = 0x%08x", ents->IOAPIC.Addr);
228 case 3: // I/O Interrupt Assignment
230 Log("%i: I/O Interrupt Assignment", i);
231 Log("\t.IntType = %i", ents->IOInt.IntType);
232 Log("\t.Flags = 0x%04x", ents->IOInt.Flags);
233 Log("\t.SourceBusID = 0x%02x", ents->IOInt.SourceBusID);
234 Log("\t.SourceBusIRQ = 0x%02x", ents->IOInt.SourceBusIRQ);
235 Log("\t.DestAPICID = 0x%02x", ents->IOInt.DestAPICID);
236 Log("\t.DestAPICIRQ = 0x%02x", ents->IOInt.DestAPICIRQ);
238 case 4: // Local Interrupt Assignment
240 Log("%i: Local Interrupt Assignment", i);
241 Log("\t.IntType = %i", ents->LocalInt.IntType);
242 Log("\t.Flags = 0x%04x", ents->LocalInt.Flags);
243 Log("\t.SourceBusID = 0x%02x", ents->LocalInt.SourceBusID);
244 Log("\t.SourceBusIRQ = 0x%02x", ents->LocalInt.SourceBusIRQ);
245 Log("\t.DestLocalAPICID = 0x%02x", ents->LocalInt.DestLocalAPICID);
246 Log("\t.DestLocalAPICIRQ = 0x%02x", ents->LocalInt.DestLocalAPICIRQ);
249 Log("%i: Unknown (%i)", i, ents->Type);
252 ents = (void*)( (Uint)ents + entSize );
255 if( giNumCPUs > MAX_CPUS ) {
256 Warning("Too many CPUs detected (%i), only using %i of them", giNumCPUs, MAX_CPUS);
257 giNumCPUs = MAX_CPUS;
260 while( giNumInitingCPUs )
261 MM_FinishVirtualInit();
263 Panic("Uh oh... MP Table Parsing is unimplemented\n");
266 Log("No MP Table was found, assuming uniprocessor\n");
273 MM_FinishVirtualInit();
276 // Initialise Double Fault TSS
278 gGDT[5].LimitLow = sizeof(tTSS);
280 gGDT[5].Access = 0x89; // Type
283 gGDT[5].BaseLow = (Uint)&gDoubleFault_TSS & 0xFFFF;
284 gGDT[5].BaseMid = (Uint)&gDoubleFault_TSS >> 16;
285 gGDT[5].BaseHi = (Uint)&gDoubleFault_TSS >> 24;
287 Log_Debug("Proc", "gIDT[8] = {OffsetLo:%04x, CS:%04x, Flags:%04x, OffsetHi:%04x}",
288 gIDT[8].OffsetLo, gIDT[8].CS, gIDT[8].Flags, gIDT[8].OffsetHi);
289 gIDT[8].OffsetLo = 0;
291 gIDT[8].Flags = 0x8500;
292 gIDT[8].OffsetHi = 0;
293 Log_Debug("Proc", "gIDT[8] = {OffsetLo:%04x, CS:%04x, Flags:%04x, OffsetHi:%04x}",
294 gIDT[8].OffsetLo, gIDT[8].CS, gIDT[8].Flags, gIDT[8].OffsetHi);
296 //__asm__ __volatile__ ("xchg %bx, %bx");
299 // Initialise Normal TSS(s)
300 for(pos=0;pos<giNumCPUs;pos++)
305 gTSSs[pos].SS0 = 0x10;
306 gTSSs[pos].ESP0 = 0; // Set properly by scheduler
307 gGDT[6+pos].BaseLow = ((Uint)(&gTSSs[pos])) & 0xFFFF;
308 gGDT[6+pos].BaseMid = ((Uint)(&gTSSs[pos])) >> 16;
309 gGDT[6+pos].BaseHi = ((Uint)(&gTSSs[pos])) >> 24;
312 for(pos=0;pos<giNumCPUs;pos++) {
314 __asm__ __volatile__ ("ltr %%ax"::"a"(0x30+pos*8));
320 gaCPUs[0].Current = &gThreadZero;
322 gCurrentThread = &gThreadZero;
326 gThreadZero.MemState.PDP[0] = 0;
327 gThreadZero.MemState.PDP[1] = 0;
328 gThreadZero.MemState.PDP[2] = 0;
330 gThreadZero.MemState.CR3 = (Uint)gaInitPageDir - KERNEL_BASE;
333 // Set timer frequency
334 outb(0x43, 0x34); // Set Channel 0, Low/High, Rate Generator
335 outb(0x40, TIMER_DIVISOR&0xFF); // Low Byte of Divisor
336 outb(0x40, (TIMER_DIVISOR>>8)&0xFF); // High Byte
338 // Create Per-Process Data Block
339 MM_Allocate(MM_PPD_CFG);
346 void MP_StartAP(int CPU)
348 Log("Starting AP %i (APIC %i)", CPU, gaCPUs[CPU].APICID);
349 // Set location of AP startup code and mark for a warm restart
350 *(Uint16*)(KERNEL_BASE|0x467) = (Uint)&APStartup - (KERNEL_BASE|0xFFFF0);
351 *(Uint16*)(KERNEL_BASE|0x469) = 0xFFFF;
352 outb(0x70, 0x0F); outb(0x71, 0x0A); // Warm Reset
353 MP_SendIPI(gaCPUs[CPU].APICID, 0, 5);
357 void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode)
359 Uint32 addr = (Uint)gpMP_LocalAPIC + 0x300;
363 val = (Uint)APICID << 24;
364 Log("*%p = 0x%08x", addr+0x10, val);
365 *(Uint32*)(addr+0x10) = val;
367 val = ((DeliveryMode & 7) << 8) | (Vector & 0xFF);
368 Log("*%p = 0x%08x", addr, val);
369 *(Uint32*)addr = val;
374 * \fn void Proc_Start()
375 * \brief Start process scheduler
379 // Start Interrupts (and hence scheduler)
380 __asm__ __volatile__("sti");
384 * \fn tThread *Proc_GetCurThread()
385 * \brief Gets the current thread
387 tThread *Proc_GetCurThread()
390 return gaCPUs[ gaAPIC_to_CPU[gpMP_LocalAPIC->ID.Val&0xFF] ].Current;
392 return gCurrentThread;
397 * \fn void Proc_ChangeStack()
398 * \brief Swaps the current stack for a new one (in the proper stack reigon)
400 void Proc_ChangeStack()
404 Uint curBase, newBase;
406 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
407 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
412 newBase = MM_NewKStack();
415 Panic("What the?? Unable to allocate space for initial kernel stack");
419 curBase = (Uint)&Kernel_Stack_Top;
421 LOG("curBase = 0x%x, newBase = 0x%x", curBase, newBase);
423 // Get ESP as a used size
425 LOG("memcpy( %p, %p, 0x%x )", (void*)(newBase - esp), (void*)(curBase - esp), esp );
427 memcpy( (void*)(newBase - esp), (void*)(curBase - esp), esp );
428 // Get ESP as an offset in the new stack
431 ebp = newBase - (curBase - ebp);
433 // Repair EBPs & Stack Addresses
434 // Catches arguments also, but may trash stack-address-like values
435 for(tmpEbp = esp; tmpEbp < newBase; tmpEbp += 4)
437 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < curBase)
438 *(Uint*)tmpEbp += newBase - curBase;
441 Proc_GetCurThread()->KernelStack = newBase;
443 __asm__ __volatile__ ("mov %0, %%esp"::"r"(esp));
444 __asm__ __volatile__ ("mov %0, %%ebp"::"r"(ebp));
448 * \fn int Proc_Clone(Uint *Err, Uint Flags)
449 * \brief Clone the current process
451 int Proc_Clone(Uint *Err, Uint Flags)
454 tThread *cur = Proc_GetCurThread();
457 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
458 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
460 newThread = Threads_CloneTCB(Err, Flags);
461 if(!newThread) return -1;
463 // Initialise Memory Space (New Addr space or kernel stack)
464 if(Flags & CLONE_VM) {
465 newThread->MemState.CR3 = MM_Clone();
466 newThread->KernelStack = cur->KernelStack;
468 Uint tmpEbp, oldEsp = esp;
471 newThread->MemState.CR3 = cur->MemState.CR3;
474 newThread->KernelStack = MM_NewKStack();
476 if(newThread->KernelStack == 0) {
481 // Get ESP as a used size
482 esp = cur->KernelStack - esp;
484 memcpy( (void*)(newThread->KernelStack - esp), (void*)(cur->KernelStack - esp), esp );
485 // Get ESP as an offset in the new stack
486 esp = newThread->KernelStack - esp;
488 ebp = newThread->KernelStack - (cur->KernelStack - ebp);
490 // Repair EBPs & Stack Addresses
491 // Catches arguments also, but may trash stack-address-like values
492 for(tmpEbp = esp; tmpEbp < newThread->KernelStack; tmpEbp += 4)
494 if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < cur->KernelStack)
495 *(Uint*)tmpEbp += newThread->KernelStack - cur->KernelStack;
499 // Save core machine state
500 newThread->SavedState.ESP = esp;
501 newThread->SavedState.EBP = ebp;
503 if(eip == SWITCH_MAGIC) {
504 outb(0x20, 0x20); // ACK Timer and return as child
509 newThread->SavedState.EIP = eip;
511 // Lock list and add to active
512 Threads_AddActive(newThread);
514 return newThread->TID;
518 * \fn int Proc_SpawnWorker()
519 * \brief Spawns a new worker thread
521 int Proc_SpawnWorker()
526 cur = Proc_GetCurThread();
529 new = malloc( sizeof(tThread) );
531 Warning("Proc_SpawnWorker - Out of heap space!\n");
534 memcpy(new, &gThreadZero, sizeof(tThread));
536 new->TID = giNextTID++;
537 // Create a new worker stack (in PID0's address space)
538 // The stack is relocated by this code
539 new->KernelStack = MM_NewWorkerStack();
541 // Get ESP and EBP based in the new stack
542 __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
543 __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
544 esp = new->KernelStack - (cur->KernelStack - esp);
545 ebp = new->KernelStack - (cur->KernelStack - ebp);
547 // Save core machine state
548 new->SavedState.ESP = esp;
549 new->SavedState.EBP = ebp;
551 if(eip == SWITCH_MAGIC) {
552 outb(0x20, 0x20); // ACK Timer and return as child
557 new->SavedState.EIP = eip;
559 new->Status = THREAD_STAT_ACTIVE;
560 Threads_AddActive( new );
566 * \fn Uint Proc_MakeUserStack()
567 * \brief Creates a new user stack
569 Uint Proc_MakeUserStack()
572 Uint base = USER_STACK_TOP - USER_STACK_SZ;
574 // Check Prospective Space
575 for( i = USER_STACK_SZ >> 12; i--; )
576 if( MM_GetPhysAddr( base + (i<<12) ) != 0 )
579 if(i != -1) return 0;
581 // Allocate Stack - Allocate incrementally to clean up MM_Dump output
582 for( i = 0; i < USER_STACK_SZ/4069; i++ )
583 MM_Allocate( base + (i<<12) );
585 return base + USER_STACK_SZ;
590 * \fn void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
591 * \brief Starts a user task
593 void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
595 Uint *stack = (void*)Proc_MakeUserStack();
600 LOG("stack = 0x%x", stack);
603 stack = (void*)( (Uint)stack - DataSize );
604 memcpy( stack, ArgV, DataSize );
606 // Adjust Arguments and environment
607 delta = (Uint)stack - (Uint)ArgV;
608 ArgV = (char**)stack;
609 for( i = 0; ArgV[i]; i++ ) ArgV[i] += delta;
612 for( i = 0; EnvP[i]; i++ ) EnvP[i] += delta;
614 // User Mode Segments
615 ss = 0x23; cs = 0x1B;
618 *--stack = (Uint)EnvP;
619 *--stack = (Uint)ArgV;
620 *--stack = (Uint)ArgC;
623 *--stack = 0; // Return Address
625 Proc_StartProcess(ss, (Uint)stack, 0x202, cs, Entrypoint);
628 void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP)
630 Uint *stack = (void*)Stack;
631 *--stack = SS; //Stack Segment
632 *--stack = Stack; //Stack Pointer
633 *--stack = Flags; //EFLAGS (Resvd (0x2) and IF (0x20))
634 *--stack = CS; //Code Segment
637 *--stack = 0xAAAAAAAA; // eax
638 *--stack = 0xCCCCCCCC; // ecx
639 *--stack = 0xDDDDDDDD; // edx
640 *--stack = 0xBBBBBBBB; // ebx
641 *--stack = 0xD1D1D1D1; // edi
642 *--stack = 0x54545454; // esp - NOT POPED
643 *--stack = 0x51515151; // esi
644 *--stack = 0xB4B4B4B4; // ebp
651 __asm__ __volatile__ (
652 "mov %%eax,%%esp;\n\t" // Set stack pointer
658 "iret;\n\t" : : "a" (stack));
663 * \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
664 * \brief Demotes a process to a lower permission level
665 * \param Err Pointer to user's errno
666 * \param Dest New Permission Level
667 * \param Regs Pointer to user's register structure
669 int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
671 int cpl = Regs->cs & 3;
673 if(Dest > 3 || Dest < 0) {
684 // Change the Segment Registers
685 Regs->cs = (((Dest+1)<<4) | Dest) - 8;
686 Regs->ss = ((Dest+1)<<4) | Dest;
687 // Check if the GP Segs are GDT, then change them
688 if(!(Regs->ds & 4)) Regs->ds = ((Dest+1)<<4) | Dest;
689 if(!(Regs->es & 4)) Regs->es = ((Dest+1)<<4) | Dest;
690 if(!(Regs->fs & 4)) Regs->fs = ((Dest+1)<<4) | Dest;
691 if(!(Regs->gs & 4)) Regs->gs = ((Dest+1)<<4) | Dest;
697 * \fn void Proc_Scheduler(int CPU)
698 * \brief Swap current thread and clears dead threads
700 void Proc_Scheduler(int CPU)
705 // If the spinlock is set, let it complete
706 if(giThreadListLock) return;
708 // Clear Delete Queue
709 while(gDeleteThreads)
711 thread = gDeleteThreads->Next;
712 if(gDeleteThreads->IsLocked) { // Only free if structure is unused
713 gDeleteThreads->Status = THREAD_STAT_NULL;
714 free( gDeleteThreads );
716 gDeleteThreads = thread;
719 // Check if there is any tasks running
720 if(giNumActiveThreads == 0) {
721 Log("No Active threads, sleeping");
722 __asm__ __volatile__ ("hlt");
726 // Get current thread
728 thread = gaCPUs[CPU].Current;
730 thread = gCurrentThread;
733 // Reduce remaining quantum and continue timeslice if non-zero
734 if(thread->Remaining--) return;
735 // Reset quantum for next call
736 thread->Remaining = thread->Quantum;
739 __asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
740 __asm__ __volatile__ ("mov %%ebp, %0":"=r"(ebp));
742 if(eip == SWITCH_MAGIC) return; // Check if a switch happened
744 // Save machine state
745 thread->SavedState.ESP = esp;
746 thread->SavedState.EBP = ebp;
747 thread->SavedState.EIP = eip;
750 thread = Threads_GetNextToRun(CPU);
754 Warning("Hmm... Threads_GetNextToRun returned NULL, I don't think this should happen.\n");
758 #if DEBUG_TRACE_SWITCH
759 Log("Switching to task %i, CR3 = 0x%x, EIP = %p",
761 thread->MemState.CR3,
762 thread->SavedState.EIP
766 // Set current thread
768 gaCPUs[CPU].Current = thread;
770 gCurrentThread = thread;
773 // Update Kernel Stack pointer
774 gTSSs[CPU].ESP0 = thread->KernelStack-4;
778 # error "Todo: Implement PAE Address space switching"
780 __asm__ __volatile__ ("mov %0, %%cr3"::"a"(thread->MemState.CR3));
783 __asm__ __volatile__ (
784 "mov %1, %%esp\n\t" // Restore ESP
785 "mov %2, %%ebp\n\t" // and EBP
786 "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler)
787 "a"(SWITCH_MAGIC), "b"(thread->SavedState.ESP),
788 "d"(thread->SavedState.EBP), "c"(thread->SavedState.EIP)
790 for(;;); // Shouldn't reach here
794 EXPORT(Proc_SpawnWorker);