From: John Hodge Date: Sun, 25 Sep 2011 03:04:58 +0000 (+0800) Subject: Kernel/x86 - Fixed build and run after x86_64 and arm7 changes to API X-Git-Tag: rel0.11~74 X-Git-Url: https://git.ucc.asn.au/?a=commitdiff_plain;h=17aac974ab83a3521f2b49b8de33ae05a00fbe07;p=tpg%2Facess2.git Kernel/x86 - Fixed build and run after x86_64 and arm7 changes to API --- diff --git a/Kernel/arch/x86/include/mm_virt.h b/Kernel/arch/x86/include/mm_virt.h index 4125d71c..4a11e716 100644 --- a/Kernel/arch/x86/include/mm_virt.h +++ b/Kernel/arch/x86/include/mm_virt.h @@ -44,6 +44,6 @@ extern void MM_FinishVirtualInit(void); extern void MM_SetCR3(Uint CR3); extern tPAddr MM_Clone(void); extern tVAddr MM_NewKStack(void); -extern tVAddr MM_NewWorkerStack(void); +extern tVAddr MM_NewWorkerStack(Uint *InitialStack, size_t StackSize); #endif diff --git a/Kernel/arch/x86/lib.c b/Kernel/arch/x86/lib.c index 0d493bb1..3a70c754 100644 --- a/Kernel/arch/x86/lib.c +++ b/Kernel/arch/x86/lib.c @@ -343,6 +343,25 @@ void *memcpyd(void *Dest, const void *Src, size_t Num) return Dest; } +Uint64 DivMod64U(Uint64 Num, Uint64 Div, Uint64 *Rem) +{ + Uint64 ret; + if( Div < 0x100000000ULL && Num < 0xFFFFFFFF * Div ) { + Uint32 rem, ret_32; + __asm__ __volatile__( + "div %4" + : "=a" (ret_32), "=d" (rem) + : "a" ( (Uint32)(Num & 0xFFFFFFFF) ), "d" ((Uint32)(Num >> 32)), "r" (Div) + ); + if(Rem) *Rem = rem; + return ret_32; + } + + ret = __udivdi3(Num, Div); + if(Rem) *Rem = __umoddi3(Num, Div); + return ret; +} + /** * \fn Uint64 __udivdi3(Uint64 Num, Uint64 Den) * \brief Divide two 64-bit integers diff --git a/Kernel/arch/x86/mm_virt.c b/Kernel/arch/x86/mm_virt.c index 77ee4f6f..970e9575 100644 --- a/Kernel/arch/x86/mm_virt.c +++ b/Kernel/arch/x86/mm_virt.c @@ -680,18 +680,11 @@ tVAddr MM_NewKStack(void) * \fn tVAddr MM_NewWorkerStack() * \brief Creates a new worker stack */ -tVAddr MM_NewWorkerStack() +tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize) { - Uint esp, ebp; - Uint oldstack; Uint base, addr; - int i, j; - Uint *tmpPage; - tPAddr pages[WORKER_STACK_SIZE>>12]; - - // Get the old ESP and EBP - __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp)); - __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp)); + tVAddr tmpPage; + tPAddr page; // TODO: Thread safety // Find a free worker stack address @@ -741,44 +734,22 @@ tVAddr MM_NewWorkerStack() // Mapping Time! for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 ) - //for( addr = WORKER_STACK_SIZE; addr; addr -= 0x1000 ) { - pages[ addr >> 12 ] = MM_AllocPhys(); - gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3; + page = MM_AllocPhys(); + gaTmpTable[ (base + addr) >> 12 ] = page | 3; } *gpTmpCR3 = 0; // Release the temp mapping lock Mutex_Release(&glTempFractal); - - // Copy the old stack - oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1); - esp = oldstack - esp; // ESP as an offset in the stack - - // Make `base` be the top of the stack - base += WORKER_STACK_SIZE; - - i = (WORKER_STACK_SIZE>>12) - 1; - // Copy the contents of the old stack to the new one, altering the addresses - // `addr` is refering to bytes from the stack base (mem downwards) - for(addr = 0; addr < esp; addr += 0x1000) - { - Uint *stack = (Uint*)( oldstack-(addr+0x1000) ); - tmpPage = (void*)MM_MapTemp( pages[i] ); - // Copy old stack - for(j = 0; j < 1024; j++) - { - // Possible Stack address? - if(oldstack-esp < stack[j] && stack[j] < oldstack) - tmpPage[j] = base - (oldstack - stack[j]); - else // Seems not, best leave it alone - tmpPage[j] = stack[j]; - } - MM_FreeTemp((tVAddr)tmpPage); - i --; - } + + // NOTE: Max of 1 page + // `page` is the last allocated page from the previious for loop + tmpPage = MM_MapTemp( page ); + memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize); + MM_FreeTemp(tmpPage); //Log("MM_NewWorkerStack: RETURN 0x%x", base); - return base; + return base + WORKER_STACK_SIZE; } /** diff --git a/Kernel/arch/x86/proc.asm b/Kernel/arch/x86/proc.asm index 1011b53c..5a9ad0aa 100644 --- a/Kernel/arch/x86/proc.asm +++ b/Kernel/arch/x86/proc.asm @@ -8,6 +8,26 @@ KERNEL_BASE equ 0xC0000000 KSTACK_USERSTATE_SIZE equ (4+8+1+5)*4 ; SRegs, GPRegs, CPU, IRET [section .text] + +[global NewTaskHeader] +NewTaskHeader: + mov eax, [esp] + mov dr0, eax + xchg bx, bx + + sti + ; TODO: SMP + mov al, 0x20 + out 0x20, al + + mov eax, [esp+4] + add esp, 12 ; Thread, Function, Args + call eax + + push eax ; Ret val + push 0 ; 0 = This Thread + call Threads_Exit + %if USE_MP [extern giMP_TimerCount] [extern gpMP_LocalAPIC] diff --git a/Kernel/arch/x86/proc.c b/Kernel/arch/x86/proc.c index b1b18a6a..badeaec9 100644 --- a/Kernel/arch/x86/proc.c +++ b/Kernel/arch/x86/proc.c @@ -19,7 +19,6 @@ #define DEBUG_VERY_SLOW_SWITCH 0 // === CONSTANTS === -#define SWITCH_MAGIC 0xFF5317C8 // FF SWITCH - There is no code in this area // Base is 1193182 #define TIMER_BASE 1193182 #if DEBUG_VERY_SLOW_PERIOD @@ -47,6 +46,7 @@ extern void APWait(void); // 16-bit AP pause code extern void APStartup(void); // 16-bit AP startup code extern Uint GetEIP(void); // start.asm extern Uint GetEIP_Sched(void); // proc.asm +extern void NewTaskHeader(tThread *Thread, void *Fcn, int nArgs, ...); // Actually takes cdecl args extern Uint32 gaInitPageDir[1024]; // start.asm extern char Kernel_Stack_Top[]; extern tShortSpinlock glThreadListLock; @@ -614,7 +614,7 @@ int Proc_Clone(Uint Flags) newThread->SavedState.ESP = esp; newThread->SavedState.EBP = ebp; eip = GetEIP(); - if(eip == SWITCH_MAGIC) { + if(eip == 0) { //__asm__ __volatile__ ("mov %0, %%db0" : : "r" (newThread) ); #if USE_MP // ACK the interrupt @@ -640,12 +640,10 @@ int Proc_Clone(Uint Flags) * \fn int Proc_SpawnWorker(void) * \brief Spawns a new worker thread */ -int Proc_SpawnWorker(void (*Fcn)(void*), void *Data); +int Proc_SpawnWorker(void (*Fcn)(void*), void *Data) { - tThread *new, *cur; - Uint eip, esp, ebp; - - cur = Proc_GetCurThread(); + tThread *new; + Uint stack_contents[4]; // Create new thread new = Threads_CloneThreadZero(); @@ -653,36 +651,23 @@ int Proc_SpawnWorker(void (*Fcn)(void*), void *Data); Warning("Proc_SpawnWorker - Out of heap space!\n"); return -1; } - // Create a new worker stack (in PID0's address space) - // - The stack is relocated by this function - new->KernelStack = MM_NewWorkerStack(); - // Get ESP and EBP based in the new stack - __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp)); - __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp)); - esp = new->KernelStack - (cur->KernelStack - esp); - ebp = new->KernelStack - (cur->KernelStack - ebp); + // Create the stack contents + stack_contents[3] = (Uint)Data; + stack_contents[2] = 1; + stack_contents[1] = (Uint)Fcn; + stack_contents[0] = (Uint)new; + // Create a new worker stack (in PID0's address space) + new->KernelStack = MM_NewWorkerStack(stack_contents, sizeof(stack_contents)); + // Save core machine state - new->SavedState.ESP = esp; - new->SavedState.EBP = ebp; - eip = GetEIP(); - if(eip == SWITCH_MAGIC) { - //__asm__ __volatile__ ("mov %0, %%db0" : : "r"(new)); - #if USE_MP - // ACK the interrupt - if(GetCPUNum()) - gpMP_LocalAPIC->EOI.Val = 0; - else - #endif - outb(0x20, 0x20); // ACK Timer and return as child - __asm__ __volatile__ ("sti"); // Restart interrupts - return 0; - } + new->SavedState.ESP = new->KernelStack - sizeof(stack_contents); + new->SavedState.EBP = 0; + new->SavedState.EIP = (Uint)NewTaskHeader; - // Set EIP as parent - new->SavedState.EIP = eip; // Mark as active + new->Status = THREAD_STAT_PREINIT; Threads_AddActive( new ); return new->TID; @@ -963,7 +948,7 @@ void Proc_Scheduler(int CPU) __asm__ __volatile__ ( "mov %%esp, %0" : "=r" (esp) ); __asm__ __volatile__ ( "mov %%ebp, %0" : "=r" (ebp) ); eip = GetEIP(); - if(eip == SWITCH_MAGIC) { + if(eip == 0) { __asm__ __volatile__ ( "nop" : : : "eax","ebx","ecx","edx","edi","esi"); regs = (tRegs*)(ebp+(2+2)*4); // EBP,Ret + CPU,CurThread #if USE_MP @@ -1054,13 +1039,18 @@ void Proc_Scheduler(int CPU) __asm__ __volatile__("mov %0, %%db0\n\t" : : "r"(thread) ); // Switch threads __asm__ __volatile__ ( - "mov %4, %%cr3\n\t" // Set address space - "mov %1, %%esp\n\t" // Restore ESP - "mov %2, %%ebp\n\t" // and EBP - "or %5, 72(%%ebp)\n\t" // or trace flag to eflags (2+2+4+8+2)*4 - "jmp *%3" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler) - "a"(SWITCH_MAGIC), "b"(thread->SavedState.ESP), - "d"(thread->SavedState.EBP), "c"(thread->SavedState.EIP), + "mov %3, %%cr3\n\t" // Set address space + "mov %0, %%esp\n\t" // Restore ESP + "mov %1, %%ebp\n\t" // and EBP + "test %4, %4\n\t" + "jz 1f\n\t" + "or %4, 72(%%ebp)\n\t" // or trace flag to eflags (2+2+4+8+2)*4 + "1:" + "xor %%eax, %%eax\n\t" + "jmp *%2" : : // And return to where we saved state (Proc_Clone or Proc_Scheduler) + "r"(thread->SavedState.ESP), + "r"(thread->SavedState.EBP), + "r"(thread->SavedState.EIP), "r"(thread->MemState.CR3), "r"(thread->bInstrTrace&&thread->SavedState.EIP==(Uint)&GetEIP_Sched_ret?0x100:0) ); diff --git a/Kernel/drv/vterm.c b/Kernel/drv/vterm.c index 26ab81b0..277d6c52 100644 --- a/Kernel/drv/vterm.c +++ b/Kernel/drv/vterm.c @@ -109,7 +109,7 @@ const Uint16 caVT100Colours[] = { }; // === GLOBALS === -MODULE_DEFINE(0, VERSION, VTerm, VT_Install, NULL, FALLBACK_OUTPUT, DEFAULT_INPUT, NULL); +MODULE_DEFINE(0, VERSION, VTerm, VT_Install, NULL, DEFAULT_INPUT, FALLBACK_OUTPUT, NULL); tDevFS_Driver gVT_DrvInfo = { NULL, "VTerm", { diff --git a/Kernel/threads.c b/Kernel/threads.c index d5e1b4c3..c0c61329 100644 --- a/Kernel/threads.c +++ b/Kernel/threads.c @@ -849,7 +849,8 @@ void Threads_AddActive(tThread *Thread) if( Thread->Status == THREAD_STAT_ACTIVE ) { tThread *cur = Proc_GetCurThread(); - Log_Warning("Threads", "WTF, CPU%i %p (%i %s) is adding %p (%i %s) when it is active", + Log_Warning("Threads", "WTF, %p CPU%i %p (%i %s) is adding %p (%i %s) when it is active", + __builtin_return_address(0), GetCPUNum(), cur, cur->TID, cur->ThreadName, Thread, Thread->TID, Thread->ThreadName); SHORTREL( &glThreadListLock ); return ;