From: John Hodge Date: Tue, 4 Oct 2011 03:56:36 +0000 (+0800) Subject: Kernel/x86_64 - Bugfixing, cleaning up scheduling X-Git-Tag: rel0.11~35 X-Git-Url: https://git.ucc.asn.au/?a=commitdiff_plain;h=fb80e752428843b891e8fde12f2263bcd84d06be;p=tpg%2Facess2.git Kernel/x86_64 - Bugfixing, cleaning up scheduling --- diff --git a/Kernel/arch/x86_64/desctab.asm b/Kernel/arch/x86_64/desctab.asm index 27ba9137..2d96180d 100644 --- a/Kernel/arch/x86_64/desctab.asm +++ b/Kernel/arch/x86_64/desctab.asm @@ -44,7 +44,7 @@ Desctab_Init: %endrep ; Install IRQs - SETIDT 0xF0, SchedulerIRQ + SETIDT 0xF0, PIT_IRQ SETIDT 0xF1, Irq1 SETIDT 0xF2, Irq2 SETIDT 0xF3, Irq3 @@ -123,7 +123,7 @@ Desctab_Init: ; Set IA32_STAR (Kernel/User CS) mov ecx, 0xC0000081 rdmsr - mov edx, 0x8 | (0x18 << 16) ; Kernel CS (and Kernel DS/SS - 8), User CS + mov edx, 0x8 | (0x1B << 16) ; Kernel CS (and Kernel DS/SS - 8), User CS wrmsr ret @@ -290,7 +290,6 @@ IrqCommon: ; call Log mov ebx, [rsp+(16+2)*8] ; Get interrupt number (16 GPRS + 2 SRs) -; xchg bx, bx ; Bochs Magic break (NOTE: will clear the high-bits of RBX) shl ebx, 2 ; *4 mov rax, gaIRQ_Handlers lea rbx, [rax+rbx*8] @@ -328,59 +327,50 @@ IrqCommon: add rsp, 8*2 iretq -[extern Proc_Scheduler] -[global SchedulerIRQ] -; -; NOTE: Proc_Scheduler makes assumptions about the stack state when called -; -SchedulerIRQ: - push 0 ; Error code - push 0 ; IRQNum +[extern Time_UpdateTimestamp] + +%if USE_MP +[global APIC_Timer_IRQ] +APIC_Timer_IRQ: PUSH_GPR push gs push fs + + ; TODO: What to do? + + mov eax, DWORD [gpMP_LocalAPIC] + mov DWORD [eax+0x0B0], 0 + + pop fs + pop gs + POP_GPR + iretq +%endif + +[global PIT_IRQ] +PIT_IRQ: + PUSH_GPR ;PUSH_FPU ;PUSH_XMM - ; Save Thread Pointer - mov rax, dr0 - push rax - - mov rdi, dr1 ; Get the CPU Number - mov rsi, rsp ; Save stack pointer - mov rdx, SchedulerIRQ.restoreState - ; Call the Scheduler - call Proc_Scheduler -.restoreState: - - ; Restore Thread Pointer - pop rax - mov dr0, rax - - ; Send EOI (To either the APIC or the PIC) - %if USE_MP - test ebx, ebx - jnz .sendEOI + call Time_UpdateTimestamp + + %if 0 +[section .rodata] +csUserSS: db "User SS: 0x%x",0 +[section .text] + mov rdi, csUserSS + mov rsi, [rsp+0x80+0x20] + call Log %endif - ; PIC + + ; Send EOI mov al, 0x20 out 0x20, al ; ACK IRQ - %if USE_MP - jmp .ret - ; APIC -.sendEOI: - mov eax, DWORD [gpMP_LocalAPIC] - mov DWORD [eax+0x0B0], 0 - %endif -.ret: ;POP_XMM ;POP_FPU - pop fs - pop gs POP_GPR - add rsp, 2*8 ; Dummy error code and IRQ num -; xchg bx, bx iretq [extern ci_offsetof_tThread_KernelStack] @@ -415,6 +405,16 @@ SyscallStub: mov rdi, rsp sub rsp, 8 call SyscallHandler + + %if 0 +[section .rodata] +csSyscallReturn: db "Syscall Return: 0x%x",0 +[section .text] + mov rdi, csSyscallReturn + mov rsi, [rsp+0+8] + call Log + %endif + add rsp, 8 mov ebx, [rsp+8] ; Get errno mov rax, [rsp+0] ; Get return @@ -425,7 +425,6 @@ SyscallStub: pop rsp ; Change back to user stack ; TODO: Determine if user is 64 or 32 bit -; xchg bx, bx db 0x48 ; REX, nasm doesn't have a sysretq opcode sysret diff --git a/Kernel/arch/x86_64/errors.c b/Kernel/arch/x86_64/errors.c index 9ddc950b..c48174ef 100644 --- a/Kernel/arch/x86_64/errors.c +++ b/Kernel/arch/x86_64/errors.c @@ -9,7 +9,7 @@ #define MAX_BACKTRACE 6 // === IMPORTS === -void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs); + int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs); void Error_Backtrace(Uint IP, Uint BP); // === PROTOTYPES === @@ -34,13 +34,14 @@ void Error_Handler(tRegs *Regs) if( Regs->IntNum == 14 ) { __asm__ __volatile__ ("mov %%cr2, %0":"=r"(cr)); - MM_PageFault(cr, Regs->ErrorCode, Regs); - return ; + if( MM_PageFault(cr, Regs->ErrorCode, Regs) == 0 ) + return ; } - - Debug_KernelPanic(); + else { + Debug_KernelPanic(); - Error_Backtrace(Regs->RIP, Regs->RBP); + Error_Backtrace(Regs->RIP, Regs->RBP); + } Log("CPU Error %x, Code: 0x%x", Regs->IntNum, Regs->ErrorCode); // Log(" - %s", csaERROR_NAMES[Regs->IntNum]); diff --git a/Kernel/arch/x86_64/mm_virt.c b/Kernel/arch/x86_64/mm_virt.c index 2c91aed1..d9ae34cf 100644 --- a/Kernel/arch/x86_64/mm_virt.c +++ b/Kernel/arch/x86_64/mm_virt.c @@ -60,7 +60,7 @@ extern void Threads_SegFault(tVAddr Addr); // === PROTOTYPES === void MM_InitVirt(void); //void MM_FinishVirtualInit(void); -void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs); + int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs); void MM_DumpTables(tVAddr Start, tVAddr End); int MM_GetPageEntryPtr(tVAddr Addr, BOOL bTemp, BOOL bAllocate, BOOL bLargePage, tPAddr **Pointer); int MM_MapEx(tVAddr VAddr, tPAddr PAddr, BOOL bTemp, BOOL bLarge); @@ -86,7 +86,7 @@ void MM_FinishVirtualInit(void) /** * \brief Called on a page fault */ -void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) +int MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) { // TODO: Implement Copy-on-Write #if 1 @@ -104,24 +104,23 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) } else { + void *tmp; //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr); paddr = MM_AllocPhys(); if( !paddr ) { Threads_SegFault(Addr); - return ; - } - { - void *tmp = (void*)MM_MapTemp(paddr); - memcpy( tmp, (void*)(Addr & ~0xFFF), 0x1000 ); - MM_FreeTemp( (tVAddr)tmp ); + return 0; } + tmp = (void*)MM_MapTemp(paddr); + memcpy( tmp, (void*)(Addr & ~0xFFF), 0x1000 ); + MM_FreeTemp( (tVAddr)tmp ); MM_DerefPhys( PAGETABLE(Addr>>12) & PADDR_MASK ); PAGETABLE(Addr>>12) &= PF_USER; PAGETABLE(Addr>>12) |= paddr|PF_PRESENT|PF_WRITE; } INVLPG( Addr & ~0xFFF ); - return; + return 0; } #endif @@ -136,7 +135,7 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) Regs->CS, Regs->RIP, Addr); __asm__ __volatile__ ("sti"); // Restart IRQs Threads_SegFault(Addr); - return ; + return 0; } // Kernel #PF @@ -158,10 +157,8 @@ void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs) Error_Backtrace(Regs->RIP, Regs->RBP); MM_DumpTables(0, -1); - - __asm__ __volatile__ ("cli"); - for( ;; ) - HALT(); + + return 1; } /** @@ -862,7 +859,7 @@ tVAddr MM_NewWorkerStack(void *StackData, size_t StackSize) for(ret = 0x100000; ret < (1ULL << 47); ret += KERNEL_STACK_SIZE) { tPAddr *ptr; - if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) == 0 ) break; + if( MM_GetPageEntryPtr(ret, 1, 0, 0, &ptr) <= 0 ) break; if( !(*ptr & 1) ) break; } if( ret >= (1ULL << 47) ) { diff --git a/Kernel/arch/x86_64/proc.c b/Kernel/arch/x86_64/proc.c index 49713959..6ed99eea 100644 --- a/Kernel/arch/x86_64/proc.c +++ b/Kernel/arch/x86_64/proc.c @@ -16,7 +16,7 @@ #include // === FLAGS === -#define DEBUG_TRACE_SWITCH 0 +#define DEBUG_TRACE_SWITCH 1 #define BREAK_ON_SWITCH 0 // Break into bochs debugger on a task switch // === CONSTANTS === @@ -364,7 +364,10 @@ void Proc_IdleTask(void *ptr) cpu->IdleThread->ThreadName = (char*)"Idle Thread"; Threads_SetPriority( cpu->IdleThread, -1 ); // Never called randomly cpu->IdleThread->Quantum = 1; // 1 slice quantum - for(;;) HALT(); // Just yeilds + for(;;) { + HALT(); // Just yeilds + Threads_Yield(); + } } /** @@ -643,12 +646,24 @@ void Proc_StartProcess(Uint16 SS, Uint Stack, Uint Flags, Uint16 CS, Uint IP) else { // 64-bit return + #if 1 __asm__ __volatile__ ( "mov %0, %%rsp;\n\t" // Set stack pointer "mov %2, %%r11;\n\t" // Set RFLAGS "sysretq;\n\t" : : "r" (Stack), "c" (IP), "r" (Flags) ); + #else + __asm__ __volatile__ ( + "push $0x23;\n\t" // SS + "push %0;\n\t" // RSP + "push %2;\n\t" // Flags + "push $0x2B;\n\t" // CS + "push %1;\n\t" // IP + "iretq" + : : "r" (Stack), "r" (IP), "r" (Flags) + ); + #endif } for(;;); } @@ -718,17 +733,19 @@ void Proc_Reschedule(void) return ; #if DEBUG_TRACE_SWITCH - LogF("\nSwitching to task %i, CR3 = 0x%x, RIP = %p, RSP = %p\n", + LogF("\nSwitching to task %i, CR3 = 0x%x, RIP = %p, RSP = %p, KStack = %p\n", nextthread->TID, nextthread->MemState.CR3, nextthread->SavedState.RIP, - nextthread->SavedState.RSP + nextthread->SavedState.RSP, + nextthread->KernelStack ); #endif // Update CPU state gaCPUs[cpu].Current = nextthread; gTSSs[cpu].RSP0 = nextthread->KernelStack-4; + __asm__ __volatile__ ("mov %0, %%db0" : : "r" (nextthread)); SwitchTasks( nextthread->SavedState.RSP, &curthread->SavedState.RSP, @@ -744,11 +761,10 @@ void Proc_Reschedule(void) */ void Proc_Scheduler(int CPU, Uint RSP, Uint RIP) { +#if 0 + { tThread *thread; - if( CPU == 0 ) - Time_UpdateTimestamp(); - // If the spinlock is set, let it complete if(IS_LOCKED(&glThreadListLock)) return; @@ -774,6 +790,8 @@ void Proc_Scheduler(int CPU, Uint RSP, Uint RIP) // ACK Timer here? Proc_Reschedule(); + } +#endif } // === EXPORTS ===