* AcessOS Microkernel Version
* proc.c
*/
-#include <common.h>
+#include <acess.h>
#include <proc.h>
#include <mm_virt.h>
#include <errno.h>
# include <mp.h>
#endif
+// === FLAGS ===
+#define DEBUG_TRACE_SWITCH 0
+
// === CONSTANTS ===
#define SWITCH_MAGIC 0xFFFACE55 // There is no code in this area
#define TIMER_DIVISOR 11931 //~100Hz
// === IMPORTS ===
extern tGDT gGDT[];
+extern void APStartup(); // 16-bit AP startup code
extern Uint GetEIP(); // start.asm
extern Uint32 gaInitPageDir[1024]; // start.asm
extern void Kernel_Stack_Top;
extern tThread *gDeleteThreads;
extern tThread *Threads_GetNextToRun(int CPU);
extern void Threads_Dump();
+extern tThread *Threads_CloneTCB(Uint *Err, Uint Flags);
+extern void Isr7();
// === PROTOTYPES ===
void ArchThreads_Init();
+#if USE_MP
+void MP_StartAP(int CPU);
+void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode);
+#endif
+void Proc_Start();
tThread *Proc_GetCurThread();
void Proc_ChangeStack();
int Proc_Clone(Uint *Err, Uint Flags);
void Proc_Scheduler();
// === GLOBALS ===
-// --- Current State ---
+// --- Multiprocessing ---
#if USE_MP
-tThread *gCurrentThread[MAX_CPUS] = {NULL};
+volatile int giNumInitingCPUs = 0;
+tMPInfo *gMPFloatPtr = NULL;
+tAPIC *gpMP_LocalAPIC = NULL;
+Uint8 gaAPIC_to_CPU[256] = {0};
+tCPU gaCPUs[MAX_CPUS];
#else
tThread *gCurrentThread = NULL;
#endif
-// --- Multiprocessing ---
-#if USE_MP
-tMPInfo *gMPTable = NULL;
-#endif
#if USE_PAE
Uint32 *gPML4s[4] = NULL;
#endif
tTSS *gTSSs = NULL;
-#if !USE_MP
tTSS gTSS0 = {0};
-#endif
+// --- Error Recovery ---
+char gaDoubleFaultStack[1024];
+tTSS gDoubleFault_TSS = {
+ .ESP0 = (Uint)&gaDoubleFaultStack[1023],
+ .SS0 = 0x10,
+ .EIP = (Uint)Isr7
+};
// === CODE ===
/**
void ArchThreads_Init()
{
Uint pos = 0;
+
#if USE_MP
+ tMPTable *mptable;
+
+ // Mark BSP as active
+ gaCPUs[0].State = 2;
+
// -- Initialise Multiprocessing
// Find MP Floating Table
- // - EBDA
- for(pos = KERNEL_BASE|0x9FC00; pos < (KERNEL_BASE|0xA0000); pos += 16) {
- if( *(Uint*)(pos) == MPTABLE_IDENT ) {
- if(ByteSum( (void*)pos, sizeof(tMPInfo) ) != 0) continue;
- gMPTable = (void*)pos;
+ // - EBDA/Last 1Kib (640KiB)
+ for(pos = KERNEL_BASE|0x9F000; pos < (KERNEL_BASE|0xA0000); pos += 16) {
+ if( *(Uint*)(pos) == MPPTR_IDENT ) {
+ Log("Possible %p", pos);
+ if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
+ gMPFloatPtr = (void*)pos;
break;
}
}
- // - Last KiB
- if(!gMPTable) {
-
+ // - Last KiB (512KiB base mem)
+ if(!gMPFloatPtr) {
+ for(pos = KERNEL_BASE|0x7F000; pos < (KERNEL_BASE|0x80000); pos += 16) {
+ if( *(Uint*)(pos) == MPPTR_IDENT ) {
+ Log("Possible %p", pos);
+ if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
+ gMPFloatPtr = (void*)pos;
+ break;
+ }
+ }
}
// - BIOS ROM
- if(!gMPTable) {
- for(pos = KERNEL_BASE|0xF0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
- if( *(Uint*)(pos) == MPTABLE_IDENT ) {
- if(ByteSum( (void*)pos, sizeof(tMPInfo) ) != 0) continue;
- gMPTable = (void*)pos;
+ if(!gMPFloatPtr) {
+ for(pos = KERNEL_BASE|0xE0000; pos < (KERNEL_BASE|0x100000); pos += 16) {
+ if( *(Uint*)(pos) == MPPTR_IDENT ) {
+ Log("Possible %p", pos);
+ if( ByteSum((void*)pos, sizeof(tMPInfo)) != 0 ) continue;
+ gMPFloatPtr = (void*)pos;
break;
}
}
}
// If the MP Table Exists, parse it
- if(gMPTable)
+ if(gMPFloatPtr)
{
+ int i;
+ tMPTable_Ent *ents;
+ Log("gMPFloatPtr = %p", gMPFloatPtr);
+ Log("*gMPFloatPtr = {");
+ Log("\t.Sig = 0x%08x", gMPFloatPtr->Sig);
+ Log("\t.MPConfig = 0x%08x", gMPFloatPtr->MPConfig);
+ Log("\t.Length = 0x%02x", gMPFloatPtr->Length);
+ Log("\t.Version = 0x%02x", gMPFloatPtr->Version);
+ Log("\t.Checksum = 0x%02x", gMPFloatPtr->Checksum);
+ Log("\t.Features = [0x%02x,0x%02x,0x%02x,0x%02x,0x%02x]",
+ gMPFloatPtr->Features[0], gMPFloatPtr->Features[1],
+ gMPFloatPtr->Features[2], gMPFloatPtr->Features[3],
+ gMPFloatPtr->Features[4]
+ );
+ Log("}");
+
+ mptable = (void*)( KERNEL_BASE|gMPFloatPtr->MPConfig );
+ Log("mptable = %p", mptable);
+ Log("*mptable = {");
+ Log("\t.Sig = 0x%08x", mptable->Sig);
+ Log("\t.BaseTableLength = 0x%04x", mptable->BaseTableLength);
+ Log("\t.SpecRev = 0x%02x", mptable->SpecRev);
+ Log("\t.Checksum = 0x%02x", mptable->Checksum);
+ Log("\t.OEMID = '%8c'", mptable->OemID);
+ Log("\t.ProductID = '%8c'", mptable->ProductID);
+ Log("\t.OEMTablePtr = %p'", mptable->OEMTablePtr);
+ Log("\t.OEMTableSize = 0x%04x", mptable->OEMTableSize);
+ Log("\t.EntryCount = 0x%04x", mptable->EntryCount);
+ Log("\t.LocalAPICMemMap = 0x%08x", mptable->LocalAPICMemMap);
+ Log("\t.ExtendedTableLen = 0x%04x", mptable->ExtendedTableLen);
+ Log("\t.ExtendedTableChecksum = 0x%02x", mptable->ExtendedTableChecksum);
+ Log("}");
+
+ gpMP_LocalAPIC = (void*)MM_MapHWPage(mptable->LocalAPICMemMap, 1);
+
+ ents = mptable->Entries;
+ giNumCPUs = 0;
+
+ for( i = 0; i < mptable->EntryCount; i ++ )
+ {
+ int entSize = 0;
+ switch( ents->Type )
+ {
+ case 0: // Processor
+ entSize = 20;
+ Log("%i: Processor", i);
+ Log("\t.APICID = %i", ents->Proc.APICID);
+ Log("\t.APICVer = 0x%02x", ents->Proc.APICVer);
+ Log("\t.CPUFlags = 0x%02x", ents->Proc.CPUFlags);
+ Log("\t.CPUSignature = 0x%08x", ents->Proc.CPUSignature);
+ Log("\t.FeatureFlags = 0x%08x", ents->Proc.FeatureFlags);
+
+
+ if( !(ents->Proc.CPUFlags & 1) ) {
+ Log("DISABLED");
+ break;
+ }
+
+ // Check if there is too many processors
+ if(giNumCPUs >= MAX_CPUS) {
+ giNumCPUs ++; // If `giNumCPUs` > MAX_CPUS later, it will be clipped
+ break;
+ }
+
+ // Initialise CPU Info
+ gaAPIC_to_CPU[ents->Proc.APICID] = giNumCPUs;
+ gaCPUs[giNumCPUs].APICID = ents->Proc.APICID;
+ gaCPUs[giNumCPUs].State = 0;
+ giNumCPUs ++;
+
+ // Send IPI
+ if( !(ents->Proc.CPUFlags & 2) )
+ {
+ MP_StartAP( giNumCPUs-1 );
+ }
+
+ break;
+ case 1: // Bus
+ entSize = 8;
+ Log("%i: Bus", i);
+ Log("\t.ID = %i", ents->Bus.ID);
+ Log("\t.TypeString = '%6c'", ents->Bus.TypeString);
+ break;
+ case 2: // I/O APIC
+ entSize = 8;
+ Log("%i: I/O APIC", i);
+ Log("\t.ID = %i", ents->IOAPIC.ID);
+ Log("\t.Version = 0x%02x", ents->IOAPIC.Version);
+ Log("\t.Flags = 0x%02x", ents->IOAPIC.Flags);
+ Log("\t.Addr = 0x%08x", ents->IOAPIC.Addr);
+ break;
+ case 3: // I/O Interrupt Assignment
+ entSize = 8;
+ Log("%i: I/O Interrupt Assignment", i);
+ Log("\t.IntType = %i", ents->IOInt.IntType);
+ Log("\t.Flags = 0x%04x", ents->IOInt.Flags);
+ Log("\t.SourceBusID = 0x%02x", ents->IOInt.SourceBusID);
+ Log("\t.SourceBusIRQ = 0x%02x", ents->IOInt.SourceBusIRQ);
+ Log("\t.DestAPICID = 0x%02x", ents->IOInt.DestAPICID);
+ Log("\t.DestAPICIRQ = 0x%02x", ents->IOInt.DestAPICIRQ);
+ break;
+ case 4: // Local Interrupt Assignment
+ entSize = 8;
+ Log("%i: Local Interrupt Assignment", i);
+ Log("\t.IntType = %i", ents->LocalInt.IntType);
+ Log("\t.Flags = 0x%04x", ents->LocalInt.Flags);
+ Log("\t.SourceBusID = 0x%02x", ents->LocalInt.SourceBusID);
+ Log("\t.SourceBusIRQ = 0x%02x", ents->LocalInt.SourceBusIRQ);
+ Log("\t.DestLocalAPICID = 0x%02x", ents->LocalInt.DestLocalAPICID);
+ Log("\t.DestLocalAPICIRQ = 0x%02x", ents->LocalInt.DestLocalAPICIRQ);
+ break;
+ default:
+ Log("%i: Unknown (%i)", i, ents->Type);
+ break;
+ }
+ ents = (void*)( (Uint)ents + entSize );
+ }
+
+ if( giNumCPUs > MAX_CPUS ) {
+ Warning("Too many CPUs detected (%i), only using %i of them", giNumCPUs, MAX_CPUS);
+ giNumCPUs = MAX_CPUS;
+ }
+
+ while( giNumInitingCPUs )
+ MM_FinishVirtualInit();
+
Panic("Uh oh... MP Table Parsing is unimplemented\n");
- } else {
- #endif
+ }
+ else {
+ Log("No MP Table was found, assuming uniprocessor\n");
giNumCPUs = 1;
gTSSs = &gTSS0;
- #if USE_MP
}
+ #else
+ giNumCPUs = 1;
+ gTSSs = &gTSS0;
+ MM_FinishVirtualInit();
+ #endif
- // Initialise TSS
+ // Initialise Double Fault TSS
+ /*
+ gGDT[5].LimitLow = sizeof(tTSS);
+ gGDT[5].LimitHi = 0;
+ gGDT[5].Access = 0x89; // Type
+ gGDT[5].Flags = 0x4;
+ */
+ gGDT[5].BaseLow = (Uint)&gDoubleFault_TSS & 0xFFFF;
+ gGDT[5].BaseMid = (Uint)&gDoubleFault_TSS >> 16;
+ gGDT[5].BaseHi = (Uint)&gDoubleFault_TSS >> 24;
+
+ #if USE_MP
+ // Initialise Normal TSS(s)
for(pos=0;pos<giNumCPUs;pos++)
{
#else
#endif
gTSSs[pos].SS0 = 0x10;
gTSSs[pos].ESP0 = 0; // Set properly by scheduler
- gGDT[5+pos].LimitLow = sizeof(tTSS);
- gGDT[5+pos].LimitHi = 0;
- gGDT[5+pos].Access = 0x89; // Type
- gGDT[5+pos].Flags = 0x4;
- gGDT[5+pos].BaseLow = (Uint)&gTSSs[pos] & 0xFFFF;
- gGDT[5+pos].BaseMid = (Uint)&gTSSs[pos] >> 16;
- gGDT[5+pos].BaseHi = (Uint)&gTSSs[pos] >> 24;
+ gGDT[6+pos].BaseLow = ((Uint)(&gTSSs[pos])) & 0xFFFF;
+ gGDT[6+pos].BaseMid = ((Uint)(&gTSSs[pos])) >> 16;
+ gGDT[6+pos].BaseHi = ((Uint)(&gTSSs[pos])) >> 24;
#if USE_MP
}
for(pos=0;pos<giNumCPUs;pos++) {
#endif
- __asm__ __volatile__ ("ltr %%ax"::"a"(0x28+pos*8));
+ __asm__ __volatile__ ("ltr %%ax"::"a"(0x30+pos*8));
#if USE_MP
}
#endif
#if USE_MP
- gCurrentThread[0] = &gThreadZero;
+ gaCPUs[0].Current = &gThreadZero;
#else
gCurrentThread = &gThreadZero;
#endif
Proc_ChangeStack();
}
+#if USE_MP
+void MP_StartAP(int CPU)
+{
+ Log("Starting AP %i (APIC %i)", CPU, gaCPUs[CPU].APICID);
+ // Set location of AP startup code and mark for a warm restart
+ *(Uint16*)(KERNEL_BASE|0x467) = (Uint)&APStartup - (KERNEL_BASE|0xFFFF0);
+ *(Uint16*)(KERNEL_BASE|0x469) = 0xFFFF;
+ outb(0x70, 0x0F); outb(0x71, 0x0A); // Warm Reset
+ MP_SendIPI(gaCPUs[CPU].APICID, 0, 5);
+ giNumInitingCPUs ++;
+}
+
+void MP_SendIPI(Uint8 APICID, int Vector, int DeliveryMode)
+{
+ Uint32 addr = (Uint)gpMP_LocalAPIC + 0x300;
+ Uint32 val;
+
+ // Hi
+ val = (Uint)APICID << 24;
+ Log("*%p = 0x%08x", addr+0x10, val);
+ *(Uint32*)(addr+0x10) = val;
+ // Low (and send)
+ val = ((DeliveryMode & 7) << 8) | (Vector & 0xFF);
+ Log("*%p = 0x%08x", addr, val);
+ *(Uint32*)addr = val;
+}
+#endif
+
/**
* \fn void Proc_Start()
* \brief Start process scheduler
tThread *Proc_GetCurThread()
{
#if USE_MP
- return NULL;
+ return gaCPUs[ gaAPIC_to_CPU[gpMP_LocalAPIC->ID.Val&0xFF] ].Current;
#else
return gCurrentThread;
#endif
*(Uint*)tmpEbp += newBase - curBase;
}
- gCurrentThread->KernelStack = newBase;
+ Proc_GetCurThread()->KernelStack = newBase;
__asm__ __volatile__ ("mov %0, %%esp"::"r"(esp));
__asm__ __volatile__ ("mov %0, %%ebp"::"r"(ebp));
int Proc_Clone(Uint *Err, Uint Flags)
{
tThread *newThread;
+ tThread *cur = Proc_GetCurThread();
Uint eip, esp, ebp;
__asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
__asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
- // Create new thread structure
- newThread = malloc( sizeof(tThread) );
- if(!newThread) {
- Warning("Proc_Clone - Out of memory when creating thread\n");
- *Err = -ENOMEM;
- return -1;
- }
- // Base new thread on old
- memcpy(newThread, gCurrentThread, sizeof(tThread));
+ newThread = Threads_CloneTCB(Err, Flags);
+ if(!newThread) return -1;
+
// Initialise Memory Space (New Addr space or kernel stack)
if(Flags & CLONE_VM) {
- newThread->TGID = newThread->TID;
newThread->MemState.CR3 = MM_Clone();
+ newThread->KernelStack = cur->KernelStack;
} else {
Uint tmpEbp, oldEsp = esp;
+ // Set CR3
+ newThread->MemState.CR3 = cur->MemState.CR3;
+
// Create new KStack
newThread->KernelStack = MM_NewKStack();
// Check for errors
}
// Get ESP as a used size
- esp = gCurrentThread->KernelStack - esp;
+ esp = cur->KernelStack - esp;
// Copy used stack
- memcpy( (void*)(newThread->KernelStack - esp), (void*)(gCurrentThread->KernelStack - esp), esp );
+ memcpy( (void*)(newThread->KernelStack - esp), (void*)(cur->KernelStack - esp), esp );
// Get ESP as an offset in the new stack
esp = newThread->KernelStack - esp;
// Adjust EBP
- ebp = newThread->KernelStack - (gCurrentThread->KernelStack - ebp);
+ ebp = newThread->KernelStack - (cur->KernelStack - ebp);
// Repair EBPs & Stack Addresses
// Catches arguments also, but may trash stack-address-like values
for(tmpEbp = esp; tmpEbp < newThread->KernelStack; tmpEbp += 4)
{
- if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < gCurrentThread->KernelStack)
- *(Uint*)tmpEbp += newThread->KernelStack - gCurrentThread->KernelStack;
+ if(oldEsp < *(Uint*)tmpEbp && *(Uint*)tmpEbp < cur->KernelStack)
+ *(Uint*)tmpEbp += newThread->KernelStack - cur->KernelStack;
}
}
-
- // Set Pointer, Spinlock and TID
- newThread->Next = NULL;
- newThread->IsLocked = 0;
- newThread->TID = giNextTID++;
- newThread->PTID = gCurrentThread->TID;
-
- // Clear message list (messages are not inherited)
- newThread->Messages = NULL;
- newThread->LastMessage = NULL;
-
- // Set remaining (sheduler expects remaining to be correct)
- newThread->Remaining = newThread->Quantum;
// Save core machine state
newThread->SavedState.ESP = esp;
newThread->SavedState.EIP = eip;
// Lock list and add to active
- LOCK( &giThreadListLock );
- newThread->Next = gActiveThreads;
- gActiveThreads = newThread;
- giNumActiveThreads ++;
- giTotalTickets += newThread->NumTickets;
- RELEASE( &giThreadListLock );
-
- Threads_Dump();
+ Threads_AddActive(newThread);
return newThread->TID;
}
+/**
+ * \fn int Proc_SpawnWorker()
+ * \brief Spawns a new worker thread
+ */
+int Proc_SpawnWorker()
+{
+ tThread *new, *cur;
+ Uint eip, esp, ebp;
+
+ cur = Proc_GetCurThread();
+
+ // Create new thread
+ new = malloc( sizeof(tThread) );
+ if(!new) {
+ Warning("Proc_SpawnWorker - Out of heap space!\n");
+ return -1;
+ }
+ memcpy(new, &gThreadZero, sizeof(tThread));
+ // Set Thread ID
+ new->TID = giNextTID++;
+ // Create a new worker stack (in PID0's address space)
+ // The stack is relocated by this code
+ new->KernelStack = MM_NewWorkerStack();
+
+ // Get ESP and EBP based in the new stack
+ __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
+ __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
+ esp = new->KernelStack - (cur->KernelStack - esp);
+ ebp = new->KernelStack - (cur->KernelStack - ebp);
+
+ // Save core machine state
+ new->SavedState.ESP = esp;
+ new->SavedState.EBP = ebp;
+ eip = GetEIP();
+ if(eip == SWITCH_MAGIC) {
+ outb(0x20, 0x20); // ACK Timer and return as child
+ return 0;
+ }
+
+ // Set EIP as parent
+ new->SavedState.EIP = eip;
+ // Mark as active
+ new->Status = THREAD_STAT_ACTIVE;
+ Threads_AddActive( new );
+
+ return new->TID;
+}
+
/**
* \fn Uint Proc_MakeUserStack()
* \brief Creates a new user stack
/**
- * \fn void Proc_StartUser(Uint Entrypoint, Uint Base, int ArgC, char **ArgV, char **EnvP, int DataSize)
+ * \fn void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
* \brief Starts a user task
*/
void Proc_StartUser(Uint Entrypoint, Uint *Bases, int ArgC, char **ArgV, char **EnvP, int DataSize)
* \fn int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
* \brief Demotes a process to a lower permission level
* \param Err Pointer to user's errno
+ * \param Dest New Permission Level
+ * \param Regs Pointer to user's register structure
*/
int Proc_Demote(Uint *Err, int Dest, tRegs *Regs)
{
return;
}
+ // Get current thread
+ #if USE_MP
+ thread = gaCPUs[CPU].Current;
+ #else
+ thread = gCurrentThread;
+ #endif
+
// Reduce remaining quantum and continue timeslice if non-zero
- if(gCurrentThread->Remaining--) return;
+ if(thread->Remaining--) return;
// Reset quantum for next call
- gCurrentThread->Remaining = gCurrentThread->Quantum;
+ thread->Remaining = thread->Quantum;
// Get machine state
__asm__ __volatile__ ("mov %%esp, %0":"=r"(esp));
if(eip == SWITCH_MAGIC) return; // Check if a switch happened
// Save machine state
- gCurrentThread->SavedState.ESP = esp;
- gCurrentThread->SavedState.EBP = ebp;
- gCurrentThread->SavedState.EIP = eip;
+ thread->SavedState.ESP = esp;
+ thread->SavedState.EBP = ebp;
+ thread->SavedState.EIP = eip;
// Get next thread
thread = Threads_GetNextToRun(CPU);
return;
}
+ #if DEBUG_TRACE_SWITCH
+ Log("Switching to task %i, CR3 = 0x%x, EIP = %p",
+ thread->TID,
+ thread->MemState.CR3,
+ thread->SavedState.EIP
+ );
+ #endif
+
// Set current thread
+ #if USE_MP
+ gaCPUs[CPU].Current = thread;
+ #else
gCurrentThread = thread;
+ #endif
// Update Kernel Stack pointer
- gTSSs[CPU].ESP0 = thread->KernelStack;
+ gTSSs[CPU].ESP0 = thread->KernelStack-4;
// Set address space
- __asm__ __volatile__ ("mov %0, %%cr3"::"a"(gCurrentThread->MemState.CR3));
+ #if USE_PAE
+ # error "Todo: Implement PAE Address space switching"
+ #else
+ __asm__ __volatile__ ("mov %0, %%cr3"::"a"(thread->MemState.CR3));
+ #endif
// Switch threads
__asm__ __volatile__ (
- "mov %1, %%esp\n\t"
- "mov %2, %%ebp\n\t"
- "jmp *%3" : :
- "a"(SWITCH_MAGIC), "b"(gCurrentThread->SavedState.ESP),
- "d"(gCurrentThread->SavedState.EBP), "c"(gCurrentThread->SavedState.EIP));
+ "mov %1, %%esp\n\t" // Restore ESP
+ "mov %2, %%ebp\n\t" // and EBP
+ "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler)
+ "a"(SWITCH_MAGIC), "b"(thread->SavedState.ESP),
+ "d"(thread->SavedState.EBP), "c"(thread->SavedState.EIP)
+ );
for(;;); // Shouldn't reach here
}
+
+// === EXPORTS ===
+EXPORT(Proc_SpawnWorker);