From 7f4eb778a1b3d3c991a87a495d586974061b172e Mon Sep 17 00:00:00 2001 From: John Hodge Date: Fri, 3 Aug 2012 13:40:10 +0800 Subject: [PATCH] Kernel - Added untested armv6 tree (mostly copypasta of armv7) --- BuildConf/armv6/Makefile.cfg | 18 + BuildConf/armv6/default.mk | 7 + BuildConf/armv6/raspberrypi.mk | 3 + KernelLand/Kernel/arch/armv6/Makefile | 21 + KernelLand/Kernel/arch/armv6/debug.c | 57 + KernelLand/Kernel/arch/armv6/include/arch.h | 44 + .../Kernel/arch/armv6/include/assembly.h | 46 + KernelLand/Kernel/arch/armv6/include/lock.h | 63 + .../Kernel/arch/armv6/include/mm_virt.h | 57 + .../Kernel/arch/armv6/include/options.h | 22 + KernelLand/Kernel/arch/armv6/include/proc.h | 48 + KernelLand/Kernel/arch/armv6/lib.S | 84 ++ KernelLand/Kernel/arch/armv6/lib.c | 224 ++++ KernelLand/Kernel/arch/armv6/link.ld | 59 + KernelLand/Kernel/arch/armv6/main.c | 96 ++ KernelLand/Kernel/arch/armv6/mm_phys.c | 60 + KernelLand/Kernel/arch/armv6/mm_virt.c | 1080 +++++++++++++++++ KernelLand/Kernel/arch/armv6/pci.c | 32 + KernelLand/Kernel/arch/armv6/proc.S | 105 ++ KernelLand/Kernel/arch/armv6/proc.c | 235 ++++ KernelLand/Kernel/arch/armv6/start.S | 370 ++++++ KernelLand/Kernel/arch/armv6/time.c | 16 + 22 files changed, 2747 insertions(+) create mode 100644 BuildConf/armv6/Makefile.cfg create mode 100644 BuildConf/armv6/default.mk create mode 100644 BuildConf/armv6/raspberrypi.mk create mode 100644 KernelLand/Kernel/arch/armv6/Makefile create mode 100644 KernelLand/Kernel/arch/armv6/debug.c create mode 100644 KernelLand/Kernel/arch/armv6/include/arch.h create mode 100644 KernelLand/Kernel/arch/armv6/include/assembly.h create mode 100644 KernelLand/Kernel/arch/armv6/include/lock.h create mode 100644 KernelLand/Kernel/arch/armv6/include/mm_virt.h create mode 100644 KernelLand/Kernel/arch/armv6/include/options.h create mode 100644 KernelLand/Kernel/arch/armv6/include/proc.h create mode 100644 KernelLand/Kernel/arch/armv6/lib.S create mode 100644 KernelLand/Kernel/arch/armv6/lib.c create mode 100644 KernelLand/Kernel/arch/armv6/link.ld create mode 100644 KernelLand/Kernel/arch/armv6/main.c create mode 100644 KernelLand/Kernel/arch/armv6/mm_phys.c create mode 100644 KernelLand/Kernel/arch/armv6/mm_virt.c create mode 100644 KernelLand/Kernel/arch/armv6/pci.c create mode 100644 KernelLand/Kernel/arch/armv6/proc.S create mode 100644 KernelLand/Kernel/arch/armv6/proc.c create mode 100644 KernelLand/Kernel/arch/armv6/start.S create mode 100644 KernelLand/Kernel/arch/armv6/time.c diff --git a/BuildConf/armv6/Makefile.cfg b/BuildConf/armv6/Makefile.cfg new file mode 100644 index 00000000..00ed8895 --- /dev/null +++ b/BuildConf/armv6/Makefile.cfg @@ -0,0 +1,18 @@ + +ARM_CPUNAME = gerneric-armv6 +CC = arm-armv6-eabi-gcc -mcpu=$(ARM_CPUNAME) +AS = arm-armv6-eabi-gcc -mcpu=$(ARM_CPUNAME) -c +LD = arm-armv6-eabi-ld +OBJDUMP = arm-armv6-eabi-objdump +DISASM = $(OBJDUMP) -d -S +ARCHDIR = armv6 +STRIP = arm-elf-strip + +ASSUFFIX = S + +# Default Configuration +ifeq ($(PLATFORM),) + PLATFORM=raspberrypi +$(warning Defaulting to "PLATFORM=$(PLATFORM)") +endif + diff --git a/BuildConf/armv6/default.mk b/BuildConf/armv6/default.mk new file mode 100644 index 00000000..3b3cc8ba --- /dev/null +++ b/BuildConf/armv6/default.mk @@ -0,0 +1,7 @@ + +ifeq ($(PLATFORM),default) + $(error Please select a platform) +endif + +#MODULES += armv7/GIC +MODULES += Filesystems/InitRD diff --git a/BuildConf/armv6/raspberrypi.mk b/BuildConf/armv6/raspberrypi.mk new file mode 100644 index 00000000..01f08df2 --- /dev/null +++ b/BuildConf/armv6/raspberrypi.mk @@ -0,0 +1,3 @@ + +include $(ACESSDIR)/BuildConf/armv6/default.mk +ARM_CPUNAME = arm1176jzf-s diff --git a/KernelLand/Kernel/arch/armv6/Makefile b/KernelLand/Kernel/arch/armv6/Makefile new file mode 100644 index 00000000..b6768e30 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/Makefile @@ -0,0 +1,21 @@ +# +# Acess2 Kernel +# arm7 Architecture Makefile +# arch/arm7/Makefile + +CPPFLAGS = +CFLAGS = +ASFLAGS = + +CPPFLAGS += -DMMU_PRESENT=1 +LDFLAGS += +LIBGCC_PATH = $(shell $(CC) --print-libgcc-file-name) + +A_OBJ = start.ao main.o lib.o lib.ao time.o pci.o debug.o +A_OBJ += mm_phys.o mm_virt.o proc.o proc.ao + +#main.c: Makefile.BuildNum.$(ARCH) + +ifeq ($(PLATFORM),tegra2) + POSTBUILD = arm-elf-objcopy $(BIN) -O binary $(BIN) +endif diff --git a/KernelLand/Kernel/arch/armv6/debug.c b/KernelLand/Kernel/arch/armv6/debug.c new file mode 100644 index 00000000..7b9e55dd --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/debug.c @@ -0,0 +1,57 @@ +/** + * Acess2 + * - By John Hodge (thePowersGang) + * + * arch/arm7/debug.c + * - ARM7 Debug output + * NOTE: Currently designed for the realview-pb-a8 emulated by Qemu + */ +#include + +// === CONSTANTS === +//#define UART0_BASE 0x10009000 +#define UART0_BASE 0xF1000000 // Boot time mapped + +// === PROTOTYPES === +void KernelPanic_SetMode(void); +void KernelPanic_PutChar(char Ch); +void StartupPrint(const char *str); + +// === GLOBALS === + int giDebug_SerialInitialised = 0; + +// === CODE === +void Debug_PutCharDebug(char ch) +{ + if(ch == '\n') + Debug_PutCharDebug('\r'); + + #if PLATFORM_is_tegra2 + // Tegra2 + while( !(*(volatile Uint32*)(UART0_BASE + 0x14) & (1 << 5)) ) + ; + #endif + +// *(volatile Uint32*)(SERIAL_BASE + SERIAL_REG_DATA) = ch; + *(volatile Uint32*)(UART0_BASE) = ch; +} + +void Debug_PutStringDebug(const char *str) +{ + for( ; *str; str++ ) + Debug_PutCharDebug( *str ); +} + +void KernelPanic_SetMode(void) +{ +} + +void KernelPanic_PutChar(char ch) +{ +// Debug_PutCharDebug(ch); +} + +void StartupPrint(const char *str) +{ +} + diff --git a/KernelLand/Kernel/arch/armv6/include/arch.h b/KernelLand/Kernel/arch/armv6/include/arch.h new file mode 100644 index 00000000..837a5e10 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/include/arch.h @@ -0,0 +1,44 @@ +/* + * Acess2 + * ARM7 Architecture Header + */ +#ifndef _ARCH_H_ +#define _ARCH_H_ + +// === CONSTANTS === +#define INVLPTR ((void*)-1) +#define BITS 32 +#define PAGE_SIZE 0x1000 +#define KERNEL_BASE 0x80000000 // 2GiB + +// === TYPES === +typedef unsigned int Uint; +typedef unsigned char Uint8; +typedef unsigned short Uint16; +typedef unsigned long Uint32; +typedef unsigned long long Uint64; +typedef signed int Sint; +typedef signed char Sint8; +typedef signed short Sint16; +typedef signed long Sint32; +typedef signed long long Sint64; + +typedef int size_t; +typedef char BOOL; + +typedef Uint32 tVAddr; +typedef Uint32 tPAddr; + +#include "lock.h" + +// --- Debug +extern void Debug_PutCharDebug(char Ch); +extern void Debug_PutStringDebug(const char *String); + +// This should be elsewhere, but CBF +extern void MM_SetupPhys(void); +extern int MM_InitialiseVirtual(void); + +#define NO_IO_BUS 1 + +#endif diff --git a/KernelLand/Kernel/arch/armv6/include/assembly.h b/KernelLand/Kernel/arch/armv6/include/assembly.h new file mode 100644 index 00000000..0c5c57fb --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/include/assembly.h @@ -0,0 +1,46 @@ +/* + * Acess2 ARMv7 + * - By John Hodge (thePowersGang) + * + * arch/arm7/include/assembly.h + * - Assembly specific macros + */ +#ifndef _ASSEMBLY_H_ +#define _ASSEMBLY_H_ + +#define PUSH_GPRS \ + str r0, [sp,#-1*4];\ + str r1, [sp,#-2*4];\ + str r2, [sp,#-3*4];\ + str r3, [sp,#-4*4];\ + str r4, [sp,#-5*4];\ + str r5, [sp,#-6*4];\ + str r6, [sp,#-7*4];\ + str r7, [sp,#-8*4];\ + str r8, [sp,#-9*4];\ + str r9, [sp,#-10*4];\ + str r10, [sp,#-11*4];\ + str r11, [sp,#-12*4];\ + str r12, [sp,#-13*4];\ + str sp, [sp,#-14*4];\ + str lr, [sp,#-15*4];\ + sub sp, #16*4 + +#define POP_GPRS add sp, #16*4; \ + ldr r0, [sp,#-1*4]; \ + ldr r1, [sp,#-2*4]; \ + ldr r2, [sp,#-3*4]; \ + ldr r3, [sp,#-4*4]; \ + ldr r4, [sp,#-5*4]; \ + ldr r5, [sp,#-6*4]; \ + ldr r6, [sp,#-7*4]; \ + ldr r7, [sp,#-8*4]; \ + ldr r8, [sp,#-9*4]; \ + ldr r9, [sp,#-10*4]; \ + ldr r10, [sp,#-11*4]; \ + ldr r11, [sp,#-12*4]; \ + ldr r12, [sp,#-13*4]; \ + ldr lr, [sp,#-15*4]; + +#endif + diff --git a/KernelLand/Kernel/arch/armv6/include/lock.h b/KernelLand/Kernel/arch/armv6/include/lock.h new file mode 100644 index 00000000..6688af48 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/include/lock.h @@ -0,0 +1,63 @@ +/* + * Acess2 + * ARM7 Architecture + * + * lock.h - Hardware level spinlocks + */ +#ifndef _LOCK_H_ +#define _LOCK_H_ + +// === CODE === +struct sShortSpinlock { + int Lock; +}; + +// --- Spinlocks --- +static inline int IS_LOCKED(struct sShortSpinlock *Lock) +{ + return !!Lock->Lock; +} + +static inline int CPU_HAS_LOCK(struct sShortSpinlock *Lock) +{ + // TODO: Handle multiple CPUs + return !!Lock->Lock; +} + +static inline int SHORTLOCK(struct sShortSpinlock *Lock) +{ + #if 0 + // Coped from linux, yes, but I know what it does now :) + Uint tmp; + __asm__ __volatile__ ( + "1: ldrex %0, [%1]\n" // Exclusive LOAD + " teq %0, #0\n" // Check if zero + " strexeq %0, %2, [%1]\n" // Set to one if it is zero (releasing lock on the memory) + " teqeq %0, #0\n" // If the lock was avaliable, check if the write succeeded + " bne 1b" // If the lock was unavaliable, or the write failed, loop + : "=&r" (tmp) // Temp + : "r" (&Lock->Lock), "r" (1) + : "cc" // Condition codes clobbered + ); + #elif 1 + while( *(volatile int*)&Lock->Lock ) ; + Lock->Lock = 1; + #else + int v = 1; + while( v ) + __asm__ __volatile__ ( + "swp %0, %0, [%1]" + : "=r" (v) : "r" (&Lock->Lock) + : "cc" + ); + #endif + return 1; +} + +static inline void SHORTREL(struct sShortSpinlock *Lock) +{ + Lock->Lock = 0; +} + +#endif + diff --git a/KernelLand/Kernel/arch/armv6/include/mm_virt.h b/KernelLand/Kernel/arch/armv6/include/mm_virt.h new file mode 100644 index 00000000..c1f10deb --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/include/mm_virt.h @@ -0,0 +1,57 @@ +/* + * Acess2 + * ARM7 Virtual Memory Manager Header + */ +#ifndef _MM_VIRT_H_ +#define _MM_VIRT_H_ + +#include "options.h" + +#define USER_STACK_COMM 0x04000 // Pages to allocate up front +#define USER_STACK_SIZE 0x10000 // Stack space +#define USER_STACK_TOP 0x78000000 + +#define MM_USER_MIN 0x00001000 +#define USER_LIB_MAX 0x70000000 +#define MM_PPD_HANDLES 0x7F800000 +#define MM_TABLE1USER 0x7FC00000 // 2 GiB - 4 MiB +#define MM_TABLE0USER 0x7FE00000 // 2 GiB - 2 MiB +#define MM_KSTACK_BASE 0x7FE00000 +#define MM_KSTACK_END 0x80000000 + +// Page Blocks are 12-bits wide (12 address bits used) +// Hence, the table is 16KiB large (and must be so aligned) +// and each block addresses 1MiB of data + +// First level table is aligned to 16KiB (restriction of TTBR reg) +// - VMSAv6 uses two TTBR regs, determined by bit 31 + +//#define KERNEL_BASE 0x80000000 // 2GiB + +#define MM_KHEAP_BASE 0x80800000 // 8MiB of kernel code +#define MM_KHEAP_MAX 0xC0000000 // ~1GiB of kernel heap + +#define MM_MODULE_MIN 0xC0000000 // - 0xD0000000 +#define MM_MODULE_MAX 0xCF000000 + +#define MM_GLOBALSTACKS 0xCF000000 // Global stacks +#define MM_GLOBALSTACKS_END 0xD0000000 + +// PMM Data, giving it 256MiB is overkill, but it's unused atm +#define MM_MAXPHYSPAGE (1024*1024) +// 2^(32-12) max pages +// 8.125 bytes per page (for bitmap allocation) +// = 8.125 MiB +#define MM_PMM_BASE 0xE0000000 +#define MM_PMM_END 0xF0000000 + +#define MM_HWMAP_BASE 0xF0000000 // Ent 0xF00 +#define MM_HWMAP_END 0xFE000000 +#define MM_TMPMAP_BASE 0xFE000000 +#define MM_TMPMAP_END 0xFF000000 + +#define MM_KERNEL_VFS 0xFF000000 // +#define MM_TABLE1KERN 0xFF800000 // - 0x???????? 4MiB +//#define MM_TABLE0KERN 0xFFC00000 // - 0xFFE04000 16KiB + +#endif diff --git a/KernelLand/Kernel/arch/armv6/include/options.h b/KernelLand/Kernel/arch/armv6/include/options.h new file mode 100644 index 00000000..4947158d --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/include/options.h @@ -0,0 +1,22 @@ +/* + * Acess2 ARMv6 Port + * - By John Hodge (thePowersGang) + * + * options.h + * - C/ASM Shared constants + */ +#ifndef _ARMV7_OPTIONS_H_ +#define _ARMV7_OPTIONS_H_ + +#define KERNEL_BASE 0x80000000 + +#if PLATFORM_is_raspberrypi +# define UART0_PADDR 0x7E215040 // Realview +#else +# error Unknown platform +#endif + +#define MM_KSTACK_SIZE 0x2000 // 2 Pages + +#endif + diff --git a/KernelLand/Kernel/arch/armv6/include/proc.h b/KernelLand/Kernel/arch/armv6/include/proc.h new file mode 100644 index 00000000..d6ef3d55 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/include/proc.h @@ -0,0 +1,48 @@ +/* + * Acess2 + * ARM7 Architecture + * + * proc.h - Arch-Dependent Process Management + */ +#ifndef _PROC_H_ +#define _PROC_H_ + +#define MAX_CPUS 4 +#define USER_MAX 0x80000000 + +// === STRUCTURES === +typedef struct { + Uint32 IP, SP; + Uint32 UserIP, UserSP; +} tTaskState; + +typedef struct { + Uint32 Base; +} tMemoryState; + +typedef struct { + union { + Uint32 Num; + Uint32 Error; + }; + union { + Uint32 Arg1; + Uint32 Return; + }; + union { + Uint32 Arg2; + Uint32 RetHi; + }; + Uint32 Arg3; + Uint32 Arg4; + Uint32 Arg5; + Uint32 Arg6; // R6 +} tSyscallRegs; + +// === MACROS === +#define HALT() do{}while(0) + +// === PROTOTYPES === + +#endif + diff --git a/KernelLand/Kernel/arch/armv6/lib.S b/KernelLand/Kernel/arch/armv6/lib.S new file mode 100644 index 00000000..e2f06130 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/lib.S @@ -0,0 +1,84 @@ +/* + * Acess2 ARM + * - By John Hodge (thePowersGang) + * + * arch/arm7/lib.S + * - Assembly editions of library functions + */ +#include "include/assembly.h" + +.globl __memcpy_byte +__memcpy_byte: +1: + tst r2, r2 @ Check counter + moveq pc, lr @ Return if zero + ldrb r3, [r1],#1 @ Read + strb r3, [r0],#1 @ Write + sub r2, #1 + b 1b + +@ +@ Pre-aligned memcpy (32-bit blocks) +@ +.globl __memcpy_align4 +__memcpy_align4: + push {r4} + mvn r3, #3 @ Mask for checking length + + @ 4 byte chunk copies +1: tst r2, r3 + ldrne r4, [r1],#4 + strne r4, [r0],#4 + subne r2, #4 + bne 1b + + @ single byte copies to finish off +2: tst r2, #3 + beq 3f + ldrb r4, [r1],#1 + strb r4, [r0],#1 + sub r2, #1 + b 2b + +3: pop {r4} + mov pc, lr + +@ +@ Division +@ +.globl __divmod32_asm +__divmod32_asm: + push {r4} + mov r4, #0 @ Return value + mov r3, #1 @ add value + + @ Scan up for first larger multiple of 2 +1: cmp r0, r1 @ N < D + bmi 2f @ ^^ + lsl r1, r1, #1 @ D <<= 1 + lsls r3, r3, #1 @ add <<= 1 + beq .err @ result is zero + b 1b + + @ Go back down +2: lsrs r3, r3, #1 @ add >>= 1 + beq 3f @ Done (value is zero) + lsr r1, r1, #1 @ D >>= 1 + cmp r0, r1 @ N < D + bmi 2b + sub r0, r1 @ N -= D + add r4, r3 @ ret += add + b 2b +3: + tst r2, r2 @ Remainder (if wanted) + strne r0,[r2] + mov r0, r4 @ Return value + pop {r4} + mov pc, lr +.err: + mov r0, #0 + tst r2, r2 + strne r0, [r2] + pop {r4} + mov pc, lr + diff --git a/KernelLand/Kernel/arch/armv6/lib.c b/KernelLand/Kernel/arch/armv6/lib.c new file mode 100644 index 00000000..7894e3ad --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/lib.c @@ -0,0 +1,224 @@ +/* + * Acess2 ARM7 Port + * + * lib.c - Library Functions + */ +#include +#include "../helpers.h" + +// === IMPORTS === +extern void __memcpy_align4(void *_dest, const void *_src, size_t _length); +extern void __memcpy_byte(void *_dest, const void *_src, size_t _length); +extern Uint32 __divmod32_asm(Uint32 Num, Uint32 Den, Uint32 *Rem); + +// === PROTOTYPES === +Uint64 __divmod64(Uint64 Num, Uint64 Den, Uint64 *Rem); +Uint32 __divmod32(Uint32 Num, Uint32 Den, Uint32 *Rem); +#if 0 +Uint64 __udivdi3(Uint64 Num, Uint64 Den); +Uint64 __umoddi3(Uint64 Num, Uint64 Den); +Uint32 __udivsi3(Uint32 Num, Uint32 Den); +Uint32 __umodsi3(Uint32 Num, Uint32 Den); +Sint32 __divsi3(Sint32 Num, Sint32 Den); +Sint32 __modsi3(Sint32 Num, Sint32 Den); +#endif + +// === CODE === +void *memcpy(void *_dest, const void *_src, size_t _length) +{ + Uint8 *dst8 = _dest; + const Uint8 *src8 = _src; + + if( ((tVAddr)_dest & 3) == 0 && ((tVAddr)_src & 3) == 0 ) + { + __memcpy_align4(_dest, _src, _length); + return _dest; + } + + // Handle small copies / Non-aligned + if( _length < 4 || ((tVAddr)_dest & 3) != ((tVAddr)_src & 3) ) + { + __memcpy_byte(_dest, _src, _length); + return _dest; + } + + // Force alignment + while( (tVAddr)dst8 & 3 ) *dst8 ++ = *src8++, _length --; + + __memcpy_align4(dst8, src8, _length); + + return _dest; +} + +int memcmp(const void *_m1, const void *_m2, size_t _length) +{ + const Uint32 *m1, *m2; + const Uint8 *m1_8 = _m1, *m2_8 = _m2; + + // Handle small copies / Non-aligned + if( _length < 4 || ((tVAddr)_m1 & 3) != ((tVAddr)_m1 & 3) ) + { + for( ; _length--; m1_8++,m2_8++ ) { + if(*m1_8 != *m2_8) return *m1_8 - *m2_8; + } + return 0; + } + + // Force alignment + for( ; (tVAddr)m1_8 & 3; m1_8 ++, m2_8 ++) { + if(*m1_8 != *m2_8) return *m1_8 - *m2_8; + } + m1 = (void *)m1_8; m2 = (void *)m2_8; + + // DWORD copies + for( ; _length > 3; _length -= 4, m1++, m2++) + if(*m1 != *m2) return *m1 - *m2; + + // Trailing bytes + m1_8 = (void*)m1; m2_8 = (void*)m2; + for( ; _length; _length --, m1_8++, m2_8++ ) + if(*m1_8 != *m2_8) return *m1_8 - *m2_8; + + return 0; +} + +void *memset(void *_dest, int _value, size_t _length) +{ + Uint32 *dst, val32; + Uint8 *dst8 = _dest; + + _value = (Uint8)_value; + + // Handle small copies + if( _length < 4 ) + { + for( ; _length--; dst8++ ) + *dst8 = _value; + return _dest; + } + + val32 = _value; + val32 |= val32 << 8; + val32 |= val32 << 16; + + // Force alignment + while( (tVAddr)dst8 & 3 ) *dst8 ++ = _value; + dst = (void *)dst8; + + // DWORD copies + for( ; _length > 3; _length -= 4) + *dst++ = val32; + + // Trailing bytes + dst8 = (void*)dst; + for( ; _length; _length -- ) + *dst8 ++ = _value; + + return _dest; +} + +DEF_DIVMOD(64) +DEF_DIVMOD(32) + +Uint64 DivMod64U(Uint64 Num, Uint64 Den, Uint64 *Rem) +{ + Uint64 ret; + if(Den == 0) return 0; // TODO: #div0 + if(Num < Den) { + if(Rem) *Rem = Num; + return 0; + } + if(Num == 0) { + if(Rem) *Rem = 0; + return 0; + } + if(Den == 1) { + if(Rem) *Rem = 0; + return Num; + } + if(Den == 2) { + if(Rem) *Rem = Num & 1; + return Num >> 1; + } + if(Den == 16) { + if(Rem) *Rem = Num & 0xF; + return Num >> 4; + } + if(Den == 32) { + if(Rem) *Rem = Num & 0x1F; + return Num >> 5; + } + if(Den == 0x1000) { + if(Rem) *Rem = Num & 0xFFF; + return Num >> 12; + } + + if( !(Den >> 32) && !(Num >> 32) ) { + if(Rem) *Rem = 0; // Clear high bits + return __divmod32_asm(Num, Den, (Uint32*)Rem); + } + + ret = __divmod64(Num, Den, Rem); + return ret; +} + +#if 0 +// Unsigned Divide 64-bit Integer +Uint64 __udivdi3(Uint64 Num, Uint64 Den) +{ + return DivMod64U(Num, Den, NULL); +} + +// Unsigned Modulus 64-bit Integer +Uint64 __umoddi3(Uint64 Num, Uint64 Den) +{ + Uint64 ret = 0; + DivMod64U(Num, Den, &ret); + return ret; +} + +Uint32 __udivsi3(Uint32 Num, Uint32 Den) +{ + return __divmod32_asm(Num, Den, NULL); +} + +Uint32 __umodsi3(Uint32 Num, Uint32 Den) +{ + Uint32 rem; + __divmod32_asm(Num, Den, &rem); + return rem; +} +#endif + +static inline Sint32 DivMod32S(Sint32 Num, Sint32 Den, Sint32 *Rem) +{ + Sint32 ret = 1; + if( Num < 0 ) { + ret = -ret; + Num = -Num; + } + if( Den < 0 ) { + ret = -ret; + Den = -Den; + } + if(ret < 0) + ret = -__divmod32(Num, Den, (Uint32*)Rem); + else + ret = __divmod32(Num, Den, (Uint32*)Rem); + return ret; +} + +#if 0 +Sint32 __divsi3(Sint32 Num, Sint32 Den) +{ + return DivMod32S(Num, Den, NULL); +} + +Sint32 __modsi3(Sint32 Num, Sint32 Den) +{ + Sint32 rem; + DivMod32S(Num, Den, &rem); + return rem; +} +#endif + diff --git a/KernelLand/Kernel/arch/armv6/link.ld b/KernelLand/Kernel/arch/armv6/link.ld new file mode 100644 index 00000000..2ad5afed --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/link.ld @@ -0,0 +1,59 @@ +ENTRY (_start) + +_kernel_base = 0x80000000; +_usertext_vbase = 0xFFFFE000; + +SECTIONS +{ + . = 0; + .init : + { + *(.init) + } + . += _kernel_base; + .text : AT( ADDR(.text) - _kernel_base ) + { + *(.text*) + *(.rodata*) + } + __exidx_start = .; + .ARM.exidx : { *(.ARM.exidx*) } + __exidx_end = .; + .ARM.extab : { *(.ARM.extab*) } + + + /* HACKS: User accesible .text section */ + . = ALIGN(0x1000); + gUsertextPhysStart = . - _kernel_base; + . = _usertext_vbase; + .usertext : AT( gUsertextPhysStart ) + { + *(.usertext) + } + . += gUsertextPhysStart + _kernel_base - _usertext_vbase; + + /* 0x4000 (4 pages) alignment needed for root table */ + .data ALIGN(0x4000) : AT( ADDR(.data) - _kernel_base ) + { + *(.padata) + *(.data*) + + gKernelSymbols = .; + *(KEXPORT) + gKernelSymbolsEnd = .; + + gKernelModules = .; + *(KMODULES) + gKernelModulesEnd = .; + } + .bss : AT( ADDR(.bss) - _kernel_base ) + { + bss_start = .; + *(.bss*) + *(COMMON*) + . = ALIGN(0x1000); + *(.pabss) + bss_end = .; + } + gKernelEnd = .; +} diff --git a/KernelLand/Kernel/arch/armv6/main.c b/KernelLand/Kernel/arch/armv6/main.c new file mode 100644 index 00000000..248c17c5 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/main.c @@ -0,0 +1,96 @@ +/* + * Acess2 + * + * ARM7 Entrypoint + * arch/arm7/main.c + */ +#define DEBUG 0 + +#include +#include + +// === IMPORTS === +extern void Interrupts_Setup(void); +extern void Arch_LoadBootModules(void); +extern void Heap_Install(void); +extern void Threads_Init(void); +extern void System_Init(const char *Commandline); + +// === PROTOTYPES === + int kmain(void); +Uint32 ARMv7_int_HandleSyscalls(Uint32 Num, Uint32 *Args); + +// === CODE === +int kmain(void) +{ + LogF("Acess2 ARMv7 v"EXPAND_STR(KERNEL_VERSION)"\n"); + LogF(" Git Hash %s\n", gsGitHash); + LogF(" Build %i\n", BUILD_NUM); + + MM_SetupPhys(); + + LogF("Heap Setup...\n"); + Heap_Install(); + + LogF("Threads Init...\n"); + Threads_Init(); + + LogF("VFS Init...\n"); + VFS_Init(); + + // Boot modules? + Module_EnsureLoaded("armv7_GIC"); + + // + LogF("Moving to arch-independent init\n"); + #if PLATFORM_is_tegra2 + System_Init("Acess2.armv7.bin /Acess=initrd: -VTerm:Video=Tegra2Vid"); + #else + System_Init("Acess2.armv7.bin /Acess=initrd: -VTerm:Video=PL110"); + #endif +// System_Init("Acess2.armv7.bin /Acess=initrd:"); + //TODO: + LogF("End of kmain(), for(;;) Threads_Sleep();\n"); + for(;;) + Threads_Sleep(); +} + +void Arch_LoadBootModules(void) +{ +} + +Uint32 ARMv7_int_HandleSyscalls(Uint32 Num, Uint32 *Args) +{ + Uint32 ret = -1, err = 0; + Uint32 addr; + ENTER("iNum xArgs[0] xArgs[1] xArgs[2] xArgs[3]", + Num, Args[0], Args[1], Args[2], Args[3] + ); + switch(Num) + { + case 1: +// Log_Debug("ARMv7", "__clear_cache(%p, %p)", Args[0], Args[1]); + // Align + Args[0] &= ~0xFFF; + Args[1] += 0xFFF; Args[1] &= ~0xFFF; + // Invalidate! + for( addr = Args[0]; addr < Args[1]; addr += 0x1000 ) + { + LOG("addr = %p", addr); + __asm__ __volatile__ ( + "mcrlt p15, 0, %0, c7, c5, 1;\n\t" + "mcrlt p15, 0, %0, c7, c6, 1;\n\t" + : + : "r" (addr) + ); + } + ret = 0; + break; + } + Args[0] = ret; // RetLow + Args[1] = 0; // RetHi + Args[2] = err; // Errno + LEAVE('x', ret); + return ret; +} + diff --git a/KernelLand/Kernel/arch/armv6/mm_phys.c b/KernelLand/Kernel/arch/armv6/mm_phys.c new file mode 100644 index 00000000..5e4a2428 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/mm_phys.c @@ -0,0 +1,60 @@ +/* + * Acess2 + * + * ARM7 Physical Memory Manager + * arch/arm7/mm_phys.c + */ +#define DEBUG 0 + +#include +#include + +#define MM_NUM_RANGES 1 // Single range +#define MM_RANGE_MAX 0 +#define TRACE_ALLOCS 0 + +#define NUM_STATIC_ALLOC 4 + +char gStaticAllocPages[NUM_STATIC_ALLOC][PAGE_SIZE] __attribute__ ((section(".padata"))); +tPAddr gaiStaticAllocPages[NUM_STATIC_ALLOC] = { + (tPAddr)(&gStaticAllocPages[0]) - KERNEL_BASE, + (tPAddr)(&gStaticAllocPages[1]) - KERNEL_BASE, + (tPAddr)(&gStaticAllocPages[2]) - KERNEL_BASE, + (tPAddr)(&gStaticAllocPages[3]) - KERNEL_BASE +}; +extern char gKernelEnd[]; + + +#include + +//#define REALVIEW_LOWRAM_SIZE 0x10000000 +#define REALVIEW_LOWRAM_SIZE (32*1024*1024) + +void MM_SetupPhys(void) +{ + LogF("MM_SetupPhys: ()\n"); + MM_Tpl_InitPhys( REALVIEW_LOWRAM_SIZE/0x1000, NULL ); +} + +int MM_int_GetMapEntry( void *Data, int Index, tPAddr *Start, tPAddr *Length ) +{ + switch(Index) + { + case 0: + *Start = ((tVAddr)&gKernelEnd - KERNEL_BASE + 0xFFF) & ~0xFFF; + *Length = REALVIEW_LOWRAM_SIZE - *Start; + return 1; + default: + return 0; + } +} + +/** + * \brief Takes a physical address and returns the ID of its range + * \param Addr Physical address of page + * \return Range ID from eMMPhys_Ranges + */ +int MM_int_GetRangeID( tPAddr Addr ) +{ + return MM_RANGE_MAX; // ARM doesn't need ranges +} diff --git a/KernelLand/Kernel/arch/armv6/mm_virt.c b/KernelLand/Kernel/arch/armv6/mm_virt.c new file mode 100644 index 00000000..2dc1147e --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/mm_virt.c @@ -0,0 +1,1080 @@ +/* + * Acess2 + * + * ARM7 Virtual Memory Manager + * - arch/arm7/mm_virt.c + */ +#define DEBUG 0 +#include +#include +#include + +#define TRACE_MAPS 0 + +#define AP_KRW_ONLY 1 // Kernel page +#define AP_KRO_ONLY 5 // Kernel RO page +#define AP_RW_BOTH 3 // Standard RW +#define AP_RO_BOTH 7 // COW Page +#define AP_RO_USER 2 // User RO Page +#define PADDR_MASK_LVL1 0xFFFFFC00 + +// === IMPORTS === +extern Uint32 kernel_table0[]; + +// === TYPES === +typedef struct +{ + tPAddr PhysAddr; + Uint8 Size; + Uint8 Domain; + BOOL bExecutable; + BOOL bGlobal; + BOOL bShared; + int AP; +} tMM_PageInfo; + +//#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>20)]) +#define FRACTAL(table1, addr) ((table1)[ (0xFF8/4*1024) + ((addr)>>22)]) +#define USRFRACTAL(addr) (*((Uint32*)(0x7FDFF000) + ((addr)>>22))) +#define TLBIALL() __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0)) +#define TLBIMVA(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c8, c7, 1" : : "r" (((addr)&~0xFFF)|1):"memory") +#define DCCMVAC(addr) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((addr)&~0xFFF)) + +// === PROTOTYPES === +void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1); + int MM_int_AllocateCoarse(tVAddr VAddr, int Domain); + int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi); + int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi); +tVAddr MM_NewUserStack(void); +tPAddr MM_AllocateZero(tVAddr VAddr); +tPAddr MM_AllocateRootTable(void); +void MM_int_CloneTable(Uint32 *DestEnt, int Table); +tPAddr MM_Clone(void); +tVAddr MM_NewKStack(int bGlobal); +void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info); +//void MM_DumpTables(tVAddr Start, tVAddr End); +void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch); + +// === GLOBALS === +tPAddr giMM_ZeroPage; + +// === CODE === +int MM_InitialiseVirtual(void) +{ + return 0; +} + +void MM_int_GetTables(tVAddr VAddr, Uint32 **Table0, Uint32 **Table1) +{ + if(VAddr & 0x80000000) { + *Table0 = (void*)&kernel_table0; // Level 0 + *Table1 = (void*)MM_TABLE1KERN; // Level 1 + } + else { + *Table0 = (void*)MM_TABLE0USER; + *Table1 = (void*)MM_TABLE1USER; + } +} + +int MM_int_AllocateCoarse(tVAddr VAddr, int Domain) +{ + Uint32 *table0, *table1; + Uint32 *desc; + tPAddr paddr; + + ENTER("xVAddr iDomain", VAddr, Domain); + + MM_int_GetTables(VAddr, &table0, &table1); + + VAddr &= ~(0x400000-1); // 4MiB per "block", 1 Page + + desc = &table0[ VAddr>>20]; + LOG("desc = %p", desc); + + // table0: 4 bytes = 1 MiB + + LOG("desc[0] = %x", desc[0]); + LOG("desc[1] = %x", desc[1]); + LOG("desc[2] = %x", desc[2]); + LOG("desc[3] = %x", desc[3]); + + if( (desc[0] & 3) != 0 || (desc[1] & 3) != 0 + || (desc[2] & 3) != 0 || (desc[3] & 3) != 0 ) + { + // Error? + LEAVE('i', 1); + return 1; + } + + paddr = MM_AllocPhys(); + if( !paddr ) + { + // Error + LEAVE('i', 2); + return 2; + } + + *desc = paddr | (Domain << 5) | 1; + desc[1] = desc[0] + 0x400; + desc[2] = desc[0] + 0x800; + desc[3] = desc[0] + 0xC00; + + if( VAddr < 0x80000000 ) { + USRFRACTAL(VAddr) = paddr | 0x13; + } + else { + FRACTAL(table1, VAddr) = paddr | 0x13; + } + + // TLBIALL + TLBIALL(); + + memset( (void*)&table1[ (VAddr >> 12) & ~(1024-1) ], 0, 0x1000 ); + + LEAVE('i', 0); + return 0; +} + +int MM_int_SetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) +{ + Uint32 *table0, *table1; + Uint32 *desc; + + ENTER("pVAddr ppi", VAddr, pi); + + MM_int_GetTables(VAddr, &table0, &table1); + + desc = &table0[ VAddr >> 20 ]; + LOG("desc = %p", desc); + + switch(pi->Size) + { + case 12: // Small Page + case 16: // Large Page + LOG("Page"); + if( (*desc & 3) == 0 ) { + MM_int_AllocateCoarse( VAddr, pi->Domain ); + } + desc = &table1[ VAddr >> 12 ]; + LOG("desc (2) = %p", desc); + if( pi->Size == 12 ) + { + // Small page + // - Error if overwriting a large page + if( (*desc & 3) == 1 ) LEAVE_RET('i', 1); + if( pi->PhysAddr == 0 ) { + *desc = 0; + TLBIMVA( VAddr ); + DCCMVAC( (tVAddr) desc ); +// #warning "HACK: TLBIALL" +// TLBIALL(); + LEAVE('i', 0); + return 0; + } + + *desc = (pi->PhysAddr & 0xFFFFF000) | 2; + if(!pi->bExecutable) *desc |= 1; // XN + if(!pi->bGlobal) *desc |= 1 << 11; // nG + if( pi->bShared) *desc |= 1 << 10; // S + *desc |= (pi->AP & 3) << 4; // AP + *desc |= ((pi->AP >> 2) & 1) << 9; // APX + TLBIMVA( VAddr ); +// #warning "HACK: TLBIALL" +// TLBIALL(); + DCCMVAC( (tVAddr) desc ); + LEAVE('i', 0); + return 0; + } + else + { + // Large page + Log_Warning("MMVirt", "TODO: Implement large pages in MM_int_SetPageInfo"); + } + break; + case 20: // Section or unmapped + Log_Warning("MMVirt", "TODO: Implement sections in MM_int_SetPageInfo"); + break; + case 24: // Supersection + // Error if not aligned + if( VAddr & 0xFFFFFF ) { + LEAVE('i', 1); + return 1; + } + if( (*desc & 3) == 0 || ((*desc & 3) == 2 && (*desc & (1 << 18))) ) + { + if( pi->PhysAddr == 0 ) { + *desc = 0; + } + else { + // Apply + *desc = pi->PhysAddr & 0xFF000000; +// *desc |= ((pi->PhysAddr >> 32) & 0xF) << 20; +// *desc |= ((pi->PhysAddr >> 36) & 0x7) << 5; + *desc |= 2 | (1 << 18); + } + // TODO: Apply to all entries + Log_Warning("MMVirt", "TODO: Apply changes to all entries of supersections"); + LEAVE('i', 0); + return 0; + } + // TODO: What here? + Log_Warning("MMVirt", "TODO: 24-bit not on supersection?"); + LEAVE('i', 1); + return 1; + } + + LEAVE('i', 1); + return 1; +} + +int MM_int_GetPageInfo(tVAddr VAddr, tMM_PageInfo *pi) +{ + Uint32 *table0, *table1; + Uint32 desc; + +// LogF("MM_int_GetPageInfo: VAddr=%p, pi=%p\n", VAddr, pi); + + MM_int_GetTables(VAddr, &table0, &table1); + + desc = table0[ VAddr >> 20 ]; + +// if( VAddr > 0x90000000) +// LOG("table0 desc(%p) = %x", &table0[ VAddr >> 20 ], desc); + + pi->bExecutable = 1; + pi->bGlobal = 0; + pi->bShared = 0; + pi->AP = 0; + + switch( (desc & 3) ) + { + // 0: Unmapped + case 0: + pi->PhysAddr = 0; + pi->Size = 20; + pi->Domain = 0; + return 1; + + // 1: Coarse page table + case 1: + // Domain from top level table + pi->Domain = (desc >> 5) & 7; + // Get next level + desc = table1[ VAddr >> 12 ]; +// LOG("table1 desc(%p) = %x", &table1[ VAddr >> 12 ], desc); + switch( desc & 3 ) + { + // 0: Unmapped + case 0: + pi->Size = 12; + return 1; + // 1: Large Page (64KiB) + case 1: + pi->Size = 16; + pi->PhysAddr = desc & 0xFFFF0000; + pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2); + pi->bExecutable = !(desc & 0x8000); + pi->bShared = (desc >> 10) & 1; + return 0; + // 2/3: Small page + case 2: + case 3: + pi->Size = 12; + pi->PhysAddr = desc & 0xFFFFF000; + pi->bExecutable = !(desc & 1); + pi->bGlobal = !(desc >> 11); + pi->bShared = (desc >> 10) & 1; + pi->AP = ((desc >> 4) & 3) | (((desc >> 9) & 1) << 2); + return 0; + } + return 1; + + // 2: Section (or Supersection) + case 2: + if( desc & (1 << 18) ) { + // Supersection + pi->PhysAddr = desc & 0xFF000000; + pi->PhysAddr |= (Uint64)((desc >> 20) & 0xF) << 32; + pi->PhysAddr |= (Uint64)((desc >> 5) & 0x7) << 36; + pi->Size = 24; + pi->Domain = 0; // Supersections default to zero + pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2); + return 0; + } + + // Section + pi->PhysAddr = desc & 0xFFF80000; + pi->Size = 20; + pi->Domain = (desc >> 5) & 7; + pi->AP = ((desc >> 10) & 3) | (((desc >> 15) & 1) << 2); + return 0; + + // 3: Reserved (invalid) + case 3: + pi->PhysAddr = 0; + pi->Size = 20; + pi->Domain = 0; + return 2; + } + return 2; +} + +// --- Exports --- +tPAddr MM_GetPhysAddr(const void *Ptr) +{ + tVAddr VAddr = (tPAddr)Ptr; + tMM_PageInfo pi; + if( MM_int_GetPageInfo(VAddr, &pi) ) + return 0; + return pi.PhysAddr | (VAddr & ((1 << pi.Size)-1)); +} + +Uint MM_GetFlags(tVAddr VAddr) +{ + tMM_PageInfo pi; + int ret; + + if( MM_int_GetPageInfo(VAddr, &pi) ) + return 0; + + ret = 0; + + switch(pi.AP) + { + case 0: + break; + case AP_KRW_ONLY: + ret |= MM_PFLAG_KERNEL; + break; + case AP_KRO_ONLY: + ret |= MM_PFLAG_KERNEL|MM_PFLAG_RO; + break; + case AP_RW_BOTH: + break; + case AP_RO_BOTH: + ret |= MM_PFLAG_COW; + break; + case AP_RO_USER: + ret |= MM_PFLAG_RO; + break; + } + + if( pi.bExecutable ) ret |= MM_PFLAG_EXEC; + return ret; +} + +void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask) +{ + tMM_PageInfo pi; + Uint curFlags; + + if( MM_int_GetPageInfo(VAddr, &pi) ) + return ; + + curFlags = MM_GetFlags(VAddr); + if( (curFlags & Mask) == Flags ) + return ; + curFlags &= ~Mask; + curFlags |= Flags; + + if( curFlags & MM_PFLAG_COW ) + pi.AP = AP_RO_BOTH; + else + { + switch(curFlags & (MM_PFLAG_KERNEL|MM_PFLAG_RO) ) + { + case 0: + pi.AP = AP_RW_BOTH; break; + case MM_PFLAG_KERNEL: + pi.AP = AP_KRW_ONLY; break; + case MM_PFLAG_RO: + pi.AP = AP_RO_USER; break; + case MM_PFLAG_KERNEL|MM_PFLAG_RO: + pi.AP = AP_KRO_ONLY; break; + } + } + + pi.bExecutable = !!(curFlags & MM_PFLAG_EXEC); + + MM_int_SetPageInfo(VAddr, &pi); +} + +int MM_IsValidBuffer(tVAddr Addr, size_t Size) +{ + tMM_PageInfo pi; + int bUser = 0; + + Size += Addr & (PAGE_SIZE-1); + Addr &= ~(PAGE_SIZE-1); + + if( MM_int_GetPageInfo(Addr, &pi) ) return 0; + Addr += PAGE_SIZE; + + if(pi.AP != AP_KRW_ONLY && pi.AP != AP_KRO_ONLY) + bUser = 1; + + while( Size >= PAGE_SIZE ) + { + if( MM_int_GetPageInfo(Addr, &pi) ) + return 0; + if(bUser && (pi.AP == AP_KRW_ONLY || pi.AP == AP_KRO_ONLY)) + return 0; + Addr += PAGE_SIZE; + Size -= PAGE_SIZE; + } + + return 1; +} + +int MM_Map(tVAddr VAddr, tPAddr PAddr) +{ + tMM_PageInfo pi = {0}; + #if TRACE_MAPS + Log("MM_Map %P=>%p", PAddr, VAddr); + #endif + + pi.PhysAddr = PAddr; + pi.Size = 12; + if(VAddr < USER_STACK_TOP) + pi.AP = AP_RW_BOTH; + else + pi.AP = AP_KRW_ONLY; // Kernel Read/Write + pi.bExecutable = 1; + if( MM_int_SetPageInfo(VAddr, &pi) ) { +// MM_DerefPhys(pi.PhysAddr); + return 0; + } + return pi.PhysAddr; +} + +tPAddr MM_Allocate(tVAddr VAddr) +{ + tMM_PageInfo pi = {0}; + + ENTER("pVAddr", VAddr); + + pi.PhysAddr = MM_AllocPhys(); + if( pi.PhysAddr == 0 ) LEAVE_RET('i', 0); + pi.Size = 12; + if(VAddr < USER_STACK_TOP) + pi.AP = AP_RW_BOTH; + else + pi.AP = AP_KRW_ONLY; + pi.bExecutable = 0; + if( MM_int_SetPageInfo(VAddr, &pi) ) { + MM_DerefPhys(pi.PhysAddr); + LEAVE('i', 0); + return 0; + } + LEAVE('x', pi.PhysAddr); + return pi.PhysAddr; +} + +tPAddr MM_AllocateZero(tVAddr VAddr) +{ + if( !giMM_ZeroPage ) { + giMM_ZeroPage = MM_Allocate(VAddr); + MM_RefPhys(giMM_ZeroPage); + memset((void*)VAddr, 0, PAGE_SIZE); + } + else { + MM_RefPhys(giMM_ZeroPage); + MM_Map(VAddr, giMM_ZeroPage); + } + MM_SetFlags(VAddr, MM_PFLAG_COW, MM_PFLAG_COW); + return giMM_ZeroPage; +} + +void MM_Deallocate(tVAddr VAddr) +{ + tMM_PageInfo pi; + + if( MM_int_GetPageInfo(VAddr, &pi) ) return ; + if( pi.PhysAddr == 0 ) return; + MM_DerefPhys(pi.PhysAddr); + + pi.PhysAddr = 0; + pi.AP = 0; + pi.bExecutable = 0; + MM_int_SetPageInfo(VAddr, &pi); +} + +tPAddr MM_AllocateRootTable(void) +{ + tPAddr ret; + + ret = MM_AllocPhysRange(2, -1); + if( ret & 0x1000 ) { + MM_DerefPhys(ret); + MM_DerefPhys(ret+0x1000); + ret = MM_AllocPhysRange(3, -1); + if( ret & 0x1000 ) { + MM_DerefPhys(ret); + ret += 0x1000; +// Log("MM_AllocateRootTable: Second try not aligned, %P", ret); + } + else { + MM_DerefPhys(ret + 0x2000); +// Log("MM_AllocateRootTable: Second try aligned, %P", ret); + } + } +// else +// Log("MM_AllocateRootTable: Got it in one, %P", ret); + return ret; +} + +void MM_int_CloneTable(Uint32 *DestEnt, int Table) +{ + tPAddr table; + Uint32 *tmp_map; + Uint32 *cur = (void*)MM_TABLE1USER; +// Uint32 *cur = &FRACTAL(MM_TABLE1USER,0); + int i; + + table = MM_AllocPhys(); + if(!table) return ; + + cur += 256*Table; + + tmp_map = MM_MapTemp(table); + + for( i = 0; i < 1024; i ++ ) + { +// Log_Debug("MMVirt", "cur[%i] (%p) = %x", Table*256+i, &cur[Table*256+i], cur[Table*256+i]); + switch(cur[i] & 3) + { + case 0: tmp_map[i] = 0; break; + case 1: + tmp_map[i] = 0; + Log_Error("MMVirt", "TODO: Support large pages in MM_int_CloneTable (%p)", (Table*256+i)*0x1000); + // Large page? + break; + case 2: + case 3: + // Small page + // - If full RW +// Debug("%p cur[%i] & 0x230 = 0x%x", Table*256*0x1000, i, cur[i] & 0x230); + if( (cur[i] & 0x230) == 0x010 ) + { + void *dst, *src; + tPAddr newpage; + newpage = MM_AllocPhys(); + src = (void*)( (Table*256+i)*0x1000 ); + dst = MM_MapTemp(newpage); +// Debug("Taking a copy of kernel page %p (%P)", src, cur[i] & ~0xFFF); + memcpy(dst, src, PAGE_SIZE); + MM_FreeTemp( dst ); + tmp_map[i] = newpage | (cur[i] & 0xFFF); + } + else + { + if( (cur[i] & 0x230) == 0x030 ) + cur[i] |= 0x200; // Set to full RO (Full RO=COW, User RO = RO) + tmp_map[i] = cur[i]; + MM_RefPhys( tmp_map[i] & ~0xFFF ); + } + break; + } + } + MM_FreeTemp( tmp_map ); + + DestEnt[0] = table + 0*0x400 + 1; + DestEnt[1] = table + 1*0x400 + 1; + DestEnt[2] = table + 2*0x400 + 1; + DestEnt[3] = table + 3*0x400 + 1; +} + +tPAddr MM_Clone(void) +{ + tPAddr ret; + Uint32 *new_lvl1_1, *new_lvl1_2, *cur; + Uint32 *tmp_map; + int i; + +// MM_DumpTables(0, KERNEL_BASE); + + ret = MM_AllocateRootTable(); + + cur = (void*)MM_TABLE0USER; + new_lvl1_1 = MM_MapTemp(ret); + new_lvl1_2 = MM_MapTemp(ret+0x1000); + tmp_map = new_lvl1_1; + for( i = 0; i < 0x800-4; i ++ ) + { + // HACK! Ignore the original identity mapping + if( i == 0 && Threads_GetTID() == 0 ) { + tmp_map[0] = 0; + continue; + } + if( i == 0x400 ) + tmp_map = &new_lvl1_2[-0x400]; + switch( cur[i] & 3 ) + { + case 0: tmp_map[i] = 0; break; + case 1: + MM_int_CloneTable(&tmp_map[i], i); + i += 3; // Tables are alocated in blocks of 4 + break; + case 2: + case 3: + Log_Error("MMVirt", "TODO: Support Sections/Supersections in MM_Clone (i=%i)", i); + tmp_map[i] = 0; + break; + } + } + + // Allocate Fractal table + { + int j, num; + tPAddr tmp = MM_AllocPhys(); + Uint32 *table = MM_MapTemp(tmp); + Uint32 sp; + register Uint32 __SP asm("sp"); + + // Map table to last 4MiB of user space + new_lvl1_2[0x3FC] = tmp + 0*0x400 + 1; + new_lvl1_2[0x3FD] = tmp + 1*0x400 + 1; + new_lvl1_2[0x3FE] = tmp + 2*0x400 + 1; + new_lvl1_2[0x3FF] = tmp + 3*0x400 + 1; + + tmp_map = new_lvl1_1; + for( j = 0; j < 512; j ++ ) + { + if( j == 256 ) + tmp_map = &new_lvl1_2[-0x400]; + if( (tmp_map[j*4] & 3) == 1 ) + { + table[j] = tmp_map[j*4] & PADDR_MASK_LVL1;// 0xFFFFFC00; + table[j] |= 0x813; // nG, Kernel Only, Small page, XN + } + else + table[j] = 0; + } + // Fractal + table[j++] = (ret + 0x0000) | 0x813; + table[j++] = (ret + 0x1000) | 0x813; + // Nuke the rest + for( ; j < 1024; j ++ ) + table[j] = 0; + + // Get kernel stack bottom + sp = __SP & ~(MM_KSTACK_SIZE-1); + j = (sp / 0x1000) % 1024; + num = MM_KSTACK_SIZE/0x1000; + +// Log("num = %i, sp = %p, j = %i", num, sp, j); + + // Copy stack pages + for(; num--; j ++, sp += 0x1000) + { + tVAddr page; + void *tmp_page; + + page = MM_AllocPhys(); +// Log("page = %P", page); + table[j] = page | 0x813; + + tmp_page = MM_MapTemp(page); + memcpy(tmp_page, (void*)sp, 0x1000); + MM_FreeTemp( tmp_page ); + } + + MM_FreeTemp( table ); + } + + MM_FreeTemp( new_lvl1_1 ); + MM_FreeTemp( new_lvl1_2 ); + +// Log("MM_Clone: ret = %P", ret); + + return ret; +} + +void MM_ClearUser(void) +{ + int i, j; + const int user_table_count = USER_STACK_TOP / (256*0x1000); + Uint32 *cur = (void*)MM_TABLE0USER; + Uint32 *tab; + +// MM_DumpTables(0, 0x80000000); + +// Log("user_table_count = %i (as opposed to %i)", user_table_count, 0x800-4); + + for( i = 0; i < user_table_count; i ++ ) + { + switch( cur[i] & 3 ) + { + case 0: break; // Already unmapped + case 1: // Sub pages + tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32)); + for( j = 0; j < 1024; j ++ ) + { + switch( tab[j] & 3 ) + { + case 0: break; // Unmapped + case 1: + Log_Error("MMVirt", "TODO: Support large pages in MM_ClearUser"); + break; + case 2: + case 3: + MM_DerefPhys( tab[j] & ~(PAGE_SIZE-1) ); + break; + } + } + MM_DerefPhys( cur[i] & ~(PAGE_SIZE-1) ); + cur[i+0] = 0; + cur[i+1] = 0; + cur[i+2] = 0; + i += 3; + break; + case 2: + case 3: + Log_Error("MMVirt", "TODO: Implement sections/supersections in MM_ClearUser"); + break; + } + cur[i] = 0; + } + + // Final block of 4 tables are KStack + i = 0x800 - 4; + + // Clear out unused stacks + { + register Uint32 __SP asm("sp"); + int cur_stack_base = ((__SP & ~(MM_KSTACK_SIZE-1)) / PAGE_SIZE) % 1024; + + tab = (void*)(MM_TABLE1USER + i*256*sizeof(Uint32)); + + // First 512 is the Table1 mapping + 2 for Table0 mapping + for( j = 512+2; j < 1024; j ++ ) + { + // Skip current stack + if( j == cur_stack_base ) { + j += (MM_KSTACK_SIZE / PAGE_SIZE) - 1; + continue ; + } + if( !(tab[j] & 3) ) continue; + ASSERT( (tab[j] & 3) == 2 ); + MM_DerefPhys( tab[j] & ~(PAGE_SIZE) ); + tab[j] = 0; + } + } + + +// MM_DumpTables(0, 0x80000000); +} + +void *MM_MapTemp(tPAddr PAddr) +{ + tVAddr ret; + tMM_PageInfo pi; + + for( ret = MM_TMPMAP_BASE; ret < MM_TMPMAP_END - PAGE_SIZE; ret += PAGE_SIZE ) + { + if( MM_int_GetPageInfo(ret, &pi) == 0 ) + continue; + +// Log("MapTemp %P at %p by %p", PAddr, ret, __builtin_return_address(0)); + MM_RefPhys(PAddr); // Counter the MM_Deallocate in FreeTemp + MM_Map(ret, PAddr); + + return (void*)ret; + } + Log_Warning("MMVirt", "MM_MapTemp: All slots taken"); + return 0; +} + +void MM_FreeTemp(void *Ptr) +{ + tVAddr VAddr = (tVAddr)Ptr; + if( VAddr < MM_TMPMAP_BASE || VAddr >= MM_TMPMAP_END ) { + Log_Warning("MMVirt", "MM_FreeTemp: Passed an addr not from MM_MapTemp (%p)", VAddr); + return ; + } + + MM_Deallocate(VAddr); +} + +tVAddr MM_MapHWPages(tPAddr PAddr, Uint NPages) +{ + tVAddr ret; + int i; + tMM_PageInfo pi; + + ENTER("xPAddr iNPages", PAddr, NPages); + + // Scan for a location + for( ret = MM_HWMAP_BASE; ret < MM_HWMAP_END - NPages * PAGE_SIZE; ret += PAGE_SIZE ) + { +// LOG("checking %p", ret); + // Check if there is `NPages` free pages + for( i = 0; i < NPages; i ++ ) + { + if( MM_int_GetPageInfo(ret + i*PAGE_SIZE, &pi) == 0 ) + break; + } + // Nope, jump to after the used page found and try again +// LOG("i = %i, ==? %i", i, NPages); + if( i != NPages ) { + ret += i * PAGE_SIZE; + continue ; + } + + // Map the pages + for( i = 0; i < NPages; i ++ ) + MM_Map(ret+i*PAGE_SIZE, PAddr+i*PAGE_SIZE); + // and return + LEAVE('p', ret); + return ret; + } + Log_Warning("MMVirt", "MM_MapHWPages: No space for a %i page block", NPages); + LEAVE('p', 0); + return 0; +} + +tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PAddr) +{ + tPAddr phys; + tVAddr ret; + + phys = MM_AllocPhysRange(Pages, MaxBits); + if(!phys) { + Log_Warning("MMVirt", "No space left for a %i page block (MM_AllocDMA)", Pages); + return 0; + } + + ret = MM_MapHWPages(phys, Pages); + *PAddr = phys; + + return ret; +} + +void MM_UnmapHWPages(tVAddr Vaddr, Uint Number) +{ + Log_Error("MMVirt", "TODO: Implement MM_UnmapHWPages"); +} + +tVAddr MM_NewKStack(int bShared) +{ + tVAddr min_addr, max_addr; + tVAddr addr, ofs; + + if( bShared ) { + min_addr = MM_GLOBALSTACKS; + max_addr = MM_GLOBALSTACKS_END; + } + else { + min_addr = MM_KSTACK_BASE; + max_addr = MM_KSTACK_END; + } + + // Locate a free slot + for( addr = min_addr; addr < max_addr; addr += MM_KSTACK_SIZE ) + { + tMM_PageInfo pi; + if( MM_int_GetPageInfo(addr+MM_KSTACK_SIZE-PAGE_SIZE, &pi) ) break; + } + + // Check for an error + if(addr >= max_addr) { + return 0; + } + + // 1 guard page + for( ofs = PAGE_SIZE; ofs < MM_KSTACK_SIZE; ofs += PAGE_SIZE ) + { + if( MM_Allocate(addr + ofs) == 0 ) + { + while(ofs) + { + ofs -= PAGE_SIZE; + MM_Deallocate(addr + ofs); + } + Log_Warning("MMVirt", "MM_NewKStack: Unable to allocate"); + return 0; + } + } + return addr + ofs; +} + +tVAddr MM_NewUserStack(void) +{ + tVAddr addr, ofs; + + addr = USER_STACK_TOP - USER_STACK_SIZE; + if( MM_GetPhysAddr( (void*)(addr + PAGE_SIZE) ) ) { + Log_Error("MMVirt", "Unable to create initial user stack, addr %p taken", + addr + PAGE_SIZE + ); + return 0; + } + + // 1 guard page + for( ofs = PAGE_SIZE; ofs < USER_STACK_SIZE; ofs += PAGE_SIZE ) + { + tPAddr rv; + if(ofs >= USER_STACK_SIZE - USER_STACK_COMM) + rv = MM_Allocate(addr + ofs); + else + rv = MM_AllocateZero(addr + ofs); + if(rv == 0) + { + while(ofs) + { + ofs -= PAGE_SIZE; + MM_Deallocate(addr + ofs); + } + Log_Warning("MMVirt", "MM_NewUserStack: Unable to allocate"); + return 0; + } + MM_SetFlags(addr+ofs, 0, MM_PFLAG_KERNEL); + } +// Log("Return %p", addr + ofs); +// MM_DumpTables(0, 0x80000000); + return addr + ofs; +} + +void MM_int_DumpTableEnt(tVAddr Start, size_t Len, tMM_PageInfo *Info) +{ + if( giMM_ZeroPage && Info->PhysAddr == giMM_ZeroPage ) + { + Debug("%p => %8s - 0x%7x %i %x %s", + Start, "ZERO", Len, + Info->Domain, Info->AP, + Info->bGlobal ? "G" : "nG" + ); + } + else + { + Debug("%p => %8x - 0x%7x %i %x %s", + Start, Info->PhysAddr-Len, Len, + Info->Domain, Info->AP, + Info->bGlobal ? "G" : "nG" + ); + } +} + +void MM_DumpTables(tVAddr Start, tVAddr End) +{ + tVAddr range_start = 0, addr; + tMM_PageInfo pi, pi_old; + int i = 0, inRange=0; + + memset(&pi_old, 0, sizeof(pi_old)); + + Debug("Page Table Dump (%p to %p):", Start, End); + range_start = Start; + for( addr = Start; i == 0 || (addr && addr < End); i = 1 ) + { + int rv; +// Log("addr = %p", addr); + rv = MM_int_GetPageInfo(addr, &pi); + if( rv + || pi.Size != pi_old.Size + || pi.Domain != pi_old.Domain + || pi.AP != pi_old.AP + || pi.bGlobal != pi_old.bGlobal + || pi_old.PhysAddr != pi.PhysAddr ) + { + if(inRange) { + MM_int_DumpTableEnt(range_start, addr - range_start, &pi_old); + } + addr &= ~((1 << pi.Size)-1); + range_start = addr; + } + + pi_old = pi; + // Handle the zero page + if( !giMM_ZeroPage || pi_old.Size != 12 || pi_old.PhysAddr != giMM_ZeroPage ) + pi_old.PhysAddr += 1 << pi_old.Size; + addr += 1 << pi_old.Size; + inRange = (rv == 0); + } + if(inRange) + MM_int_DumpTableEnt(range_start, addr - range_start, &pi); + Debug("Done"); +} + +// NOTE: Runs in abort context, not much difference, just a smaller stack +void MM_PageFault(Uint32 PC, Uint32 Addr, Uint32 DFSR, int bPrefetch) +{ + int rv; + tMM_PageInfo pi; + + rv = MM_int_GetPageInfo(Addr, &pi); + + // Check for COW + if( rv == 0 && pi.AP == AP_RO_BOTH ) + { + pi.AP = AP_RW_BOTH; + if( giMM_ZeroPage && pi.PhysAddr == giMM_ZeroPage ) + { + tPAddr newpage; + newpage = MM_AllocPhys(); + if( !newpage ) { + Log_Error("MMVirt", "Unable to allocate new page for COW of ZERO"); + for(;;); + } + + #if TRACE_COW + Log_Notice("MMVirt", "COW %p caused by %p, ZERO duped to %P (RefCnt(%i)--)", Addr, PC, + newpage, MM_GetRefCount(pi.PhysAddr)); + #endif + + MM_DerefPhys(pi.PhysAddr); + pi.PhysAddr = newpage; + pi.AP = AP_RW_BOTH; + MM_int_SetPageInfo(Addr, &pi); + + memset( (void*)(Addr & ~(PAGE_SIZE-1)), 0, PAGE_SIZE ); + + return ; + } + else if( MM_GetRefCount(pi.PhysAddr) > 1 ) + { + // Duplicate the page + tPAddr newpage; + void *dst, *src; + + newpage = MM_AllocPhys(); + if(!newpage) { + Log_Error("MMVirt", "Unable to allocate new page for COW"); + for(;;); + } + dst = MM_MapTemp(newpage); + src = (void*)(Addr & ~(PAGE_SIZE-1)); + memcpy( dst, src, PAGE_SIZE ); + MM_FreeTemp( dst ); + + #if TRACE_COW + Log_Notice("MMVirt", "COW %p caused by %p, %P duped to %P (RefCnt(%i)--)", Addr, PC, + pi.PhysAddr, newpage, MM_GetRefCount(pi.PhysAddr)); + #endif + + MM_DerefPhys(pi.PhysAddr); + pi.PhysAddr = newpage; + } + #if TRACE_COW + else { + Log_Notice("MMVirt", "COW %p caused by %p, took last reference to %P", + Addr, PC, pi.PhysAddr); + } + #endif + // Unset COW + pi.AP = AP_RW_BOTH; + MM_int_SetPageInfo(Addr, &pi); + return ; + } + + + Log_Error("MMVirt", "Code at %p accessed %p (DFSR = 0x%x)%s", PC, Addr, DFSR, + (bPrefetch ? " - Prefetch" : "") + ); + if( Addr < 0x80000000 ) + MM_DumpTables(0, 0x80000000); + else + MM_DumpTables(0x80000000, -1); + for(;;); +} + diff --git a/KernelLand/Kernel/arch/armv6/pci.c b/KernelLand/Kernel/arch/armv6/pci.c new file mode 100644 index 00000000..2e674bbc --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/pci.c @@ -0,0 +1,32 @@ +/* + * + */ +#include +#include + +// Realview +//#define PCI_BASE 0x60000000 + +//#define PCI_BASE 0xF0400000 // VMM Mapping +#define PCI_BASE 0 + +// === CODE === +void PCI_CfgWriteDWord(Uint32 Addr, Uint32 Data) +{ + #if PCI_BASE + Uint32 address = PCI_BASE | Addr; + *(Uint32*)(address) = Data; + #else + #endif +} + +Uint32 PCI_CfgReadDWord(Uint32 Addr) +{ + #if PCI_BASE + Uint32 address = PCI_BASE | Addr; + return *(Uint32*)address; + #else + return 0xFFFFFFFF; + #endif +} + diff --git a/KernelLand/Kernel/arch/armv6/proc.S b/KernelLand/Kernel/arch/armv6/proc.S new file mode 100644 index 00000000..19790583 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/proc.S @@ -0,0 +1,105 @@ +/* + * Acess2 ARM + * - By John Hodge (thePowersGang) + * + * arch/arm7/proc.S + * - Process management assembly + */ + +#include "include/assembly.h" + +.globl KernelThreadHeader +@ SP+12: Argument 1 +@ SP+8: Argument Count +@ SP+4: Function +@ SP+0: Thread Pointer +KernelThreadHeader: + ldr r0, [sp],#4 + @ TODO: Do something with the thread pointer + + ldr r4, [sp],#4 @ Function + @ Get argument + ldr r0, [sp],#4 + + blx r4 + + ldr r0, =0 + bl Threads_Exit + b . + +.globl SwitchTask +@ R0: New stack +@ R1: Pointer to where to save old stack +@ R2: New IP +@ R3: Pointer to save old IP +@ SP+0: New address space +SwitchTask: + push {r4-r12,lr} + + @ Save IP + ldr r4, =.return + str r4, [r3] + @ Save SP + str sp, [r1] + + @ Only update TTBR0 if the task has an explicit address space + ldr r1, [sp,#4*10] + tst r1, r1 + mcrne p15, 0, r1, c2, c0, 0 @ Set TTBR0 to r0 +# mov r1, #1 + mcrne p15, 0, r1, c8, c7, 0 @ TLBIALL - Invalid user space + + @ Restore state + mov sp, r0 + bx r2 + +.return: + pop {r4-r12,pc} + +.extern MM_Clone +.extern MM_DumpTables +.globl Proc_CloneInt +Proc_CloneInt: + @ R0: SP Destination + @ R1: Mem Destination + push {r4-r12,lr} + mov r4, r1 @ Save mem destination + str sp, [r0] @ Save SP to SP dest + + bl MM_Clone + str r0, [r4] @ Save clone return to Mem Dest + + ldr r0, =Proc_CloneInt_new + pop {r4-r12,pc} +Proc_CloneInt_new: + mov r0, #0 + pop {r4-r12,pc} + +@ R0: New user SP +@ Return: Old user SP +.globl Proc_int_SwapUserSP +Proc_int_SwapUserSP: + cps #31 @ Go to system mode + mov r1, sp + tst r0, r0 @ Only update if non-zero + movne sp, r0 + mov r0, r1 + cps #19 + mov pc, lr + +.section .usertext, "ax" +.globl Proc_int_DropToUser +@ R0: User IP +@ R1: User SP +Proc_int_DropToUser: + cps #16 + mov sp, r1 + mov pc, r0 + +.section .rodata +csProc_CloneInt_NewTaskMessage: + .asciz "New task PC=%p, R4=%p, sp=%p" +csProc_CloneInt_OldTaskMessage: + .asciz "Parent task PC=%p, R4=%p, SP=%p" + +@ vim: ft=armv7 diff --git a/KernelLand/Kernel/arch/armv6/proc.c b/KernelLand/Kernel/arch/armv6/proc.c new file mode 100644 index 00000000..cd998f2b --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/proc.c @@ -0,0 +1,235 @@ +/* + * Acess2 + * - By John Hodge (thePowersGang) + * + * arch/arm7/proc.c + * - ARM7 Process Switching + */ +#include +#include +#include + +// === IMPORTS === +extern tThread gThreadZero; +extern tProcess gProcessZero; +extern void SwitchTask(Uint32 NewSP, Uint32 *OldSP, Uint32 NewIP, Uint32 *OldIP, Uint32 MemPtr); +extern void KernelThreadHeader(void); // Actually takes args on stack +extern void Proc_int_DropToUser(Uint32 IP, Uint32 SP) NORETURN __attribute__((long_call)); +extern Uint32 Proc_int_SwapUserSP(Uint32 NewSP); +extern Uint32 Proc_CloneInt(Uint32 *SP, Uint32 *MemPtr); +extern tVAddr MM_NewKStack(int bGlobal); // TODO: Move out into a header +extern tVAddr MM_NewUserStack(void); +extern char kernel_table0[]; + +// === PROTOTYPES === +void Proc_IdleThread(void *unused); + +// === GLOBALS === +tThread *gpCurrentThread = &gThreadZero; +tThread *gpIdleThread = NULL; + +// === CODE === +void ArchThreads_Init(void) +{ + gProcessZero.MemState.Base = (tPAddr)&kernel_table0 - KERNEL_BASE; +} + +void Proc_IdleThread(void *unused) +{ + Threads_SetPriority(gpIdleThread, -1); + for(;;) { + Proc_Reschedule(); + __asm__ __volatile__ ("wfi"); + } +} + +void Proc_Start(void) +{ + tTID tid; + + tid = Proc_NewKThread( Proc_IdleThread, NULL ); + gpIdleThread = Threads_GetThread(tid); + gpIdleThread->ThreadName = (char*)"Idle Thread"; +} + +int GetCPUNum(void) +{ + return 0; +} + +tThread *Proc_GetCurThread(void) +{ + return gpCurrentThread; +} + +void Proc_StartUser(Uint Entrypoint, Uint Base, int ArgC, const char **ArgV, int DataSize) +{ + Uint32 *usr_sp; + int i; + const char **envp; + tVAddr delta; + +// Log_Debug("Proc", "Proc_StartUser: (Entrypoint=%p, Base=%p, ArgC=%i, ArgV=%p, DataSize=0x%x)", +// Entrypoint, Base, ArgC, ArgV, DataSize); + + // Write data to the user's stack + usr_sp = (void*)MM_NewUserStack(); + usr_sp -= (DataSize+3)/4; + memcpy(usr_sp, ArgV, DataSize); + free(ArgV); + + // Adjust user's copy of the arguments + delta = (tVAddr)usr_sp - (tVAddr)ArgV; + ArgV = (void*)usr_sp; + for(i = 0; ArgV[i]; i ++) ArgV[i] += delta; + envp = &ArgV[i+1]; + for(i = 0; envp[i]; i ++) envp[i] += delta; + + *--usr_sp = (Uint32)envp; + *--usr_sp = (Uint32)ArgV; + *--usr_sp = (Uint32)ArgC; + *--usr_sp = Base; + + // Drop to user code + Log_Debug("Proc", "Proc_int_DropToUser(%p, %p)", Entrypoint, usr_sp); + Proc_int_DropToUser(Entrypoint, (Uint32)usr_sp); +} + +void Proc_ClearProcess(tProcess *Process) +{ + Log_Warning("Proc", "TODO: Nuke address space etc"); +} + +void Proc_ClearThread(tThread *Thread) +{ +} + +tTID Proc_Clone(Uint Flags) +{ + tThread *new; + Uint32 pc, sp, mem; + + new = Threads_CloneTCB(Flags); + if(!new) return -1; + + // Actual clone magic + pc = Proc_CloneInt(&sp, &mem); + if(pc == 0) { + Log("Proc_Clone: In child"); + return 0; + } + + new->SavedState.IP = pc; + new->SavedState.SP = sp; + new->SavedState.UserSP = Proc_int_SwapUserSP(0); + new->SavedState.UserIP = Proc_GetCurThread()->SavedState.UserIP; + new->Process->MemState.Base = mem; + + Threads_AddActive(new); + + return new->TID; +} + +int Proc_SpawnWorker( void (*Fnc)(void*), void *Ptr ) +{ + tThread *new; + Uint32 sp; + + new = Threads_CloneThreadZero(); + if(!new) return -1; + if(new->ThreadName) free(new->ThreadName); + new->ThreadName = NULL; + + new->KernelStack = MM_NewKStack(1); + if(!new->KernelStack) { + // TODO: Delete thread + Log_Error("Proc", "Unable to allocate kernel stack"); + return -1; + } + + sp = new->KernelStack; + + *(Uint32*)(sp -= 4) = (Uint)Ptr; + *(Uint32*)(sp -= 4) = (Uint)Fnc; + *(Uint32*)(sp -= 4) = (Uint)new; + + new->SavedState.SP = sp; + new->SavedState.IP = (Uint)KernelThreadHeader; + + Threads_AddActive(new); + + return new->TID; +} + +tTID Proc_NewKThread( void (*Fnc)(void*), void *Ptr ) +{ + tThread *new; + Uint32 sp; + + new = Threads_CloneTCB(0); + if(!new) return -1; + free(new->ThreadName); + new->ThreadName = NULL; + + // TODO: Non-shared stack + new->KernelStack = MM_NewKStack(1); + if(!new->KernelStack) { + // TODO: Delete thread + Log_Error("Proc", "Unable to allocate kernel stack"); + return -1; + } + + sp = new->KernelStack; + + *(Uint32*)(sp -= 4) = (Uint)Ptr; + *(Uint32*)(sp -= 4) = (Uint)Fnc; + *(Uint32*)(sp -= 4) = (Uint)new; + + new->SavedState.SP = sp; + new->SavedState.IP = (Uint)KernelThreadHeader; + + Threads_AddActive(new); + + return new->TID; +} + +void Proc_CallFaultHandler(tThread *Thread) +{ + +} + +void Proc_Reschedule(void) +{ + tThread *cur, *next; + + cur = gpCurrentThread; + + next = Threads_GetNextToRun(0, cur); + if(!next) next = gpIdleThread; + if(!next || next == cur) return; + + Log("Switching to %p (%i %s) IP=%p SP=%p TTBR0=%p UsrSP=%p", + next, next->TID, next->ThreadName, + next->SavedState.IP, next->SavedState.SP, next->Process->MemState.Base, + next->SavedState.UserSP + ); + + Log("Requested by %p", __builtin_return_address(0)); + + gpCurrentThread = next; + + cur->SavedState.UserSP = Proc_int_SwapUserSP( next->SavedState.UserSP ); + + SwitchTask( + next->SavedState.SP, &cur->SavedState.SP, + next->SavedState.IP, &cur->SavedState.IP, + next->Process->MemState.Base + ); + +} + +void Proc_DumpThreadCPUState(tThread *Thread) +{ + +} + diff --git a/KernelLand/Kernel/arch/armv6/start.S b/KernelLand/Kernel/arch/armv6/start.S new file mode 100644 index 00000000..4be97676 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/start.S @@ -0,0 +1,370 @@ + +#include "include/assembly.h" +#include "include/options.h" + +@ +@ Exception defs taken from ARM DDI 0406B +@ +.section .init +interrupt_vector_table: +ivt_reset: b _start @ 0x00 Reset +ivt_undef: b Undef_Handler @ 0x04 #UD +ivt_svc: b SVC_Handler @ 0x08 SVC (used to be called SWI) +ivt_prefetch: b PrefetchAbort @ 0x0C Prefetch abort +ivt_data: b DataAbort @ 0x10 Data abort +ivt_unused: b . @ 0x14 Not Used +ivt_irq: b IRQHandler @ 0x18 IRQ +ivt_fiq: b . @ 0x1C FIQ (Fast interrupt) + +.globl _start +_start: + ldr r2, =UART0_PADDR + mov r1, #'A' + str r1, [r2] + + ldr r0, =kernel_table0-KERNEL_BASE + mcr p15, 0, r0, c2, c0, 1 @ Set TTBR1 to r0 + mcr p15, 0, r0, c2, c0, 0 @ Set TTBR0 to r0 too (for identity) + + mov r1, #'c' + str r1, [r2] + + mov r0, #1 + mcr p15, 0, r0, c2, c0, 2 @ Set TTCR to 1 (50/50 split) + + mov r1, #'e' + str r1, [r2] + + mov r0, #3 + mcr p15, 0, r0, c3, c0, 0 @ Set Domain 0 to Manager + + mov r1, #'s' + str r1, [r2] + + @ Enable VMSA + mrc p15, 0, r0, c1, c0, 0 + orr r0, r0, #1 + orr r0, r0, #1 << 23 + mcr p15, 0, r0, c1, c0, 0 + + @ HACK: Set ASID to non zero + mov r0, #1 + MCR p15,0,r0,c13,c0,1 + + ldr r2, =0xF1000000 + mov r1, #'s' + str r1, [r2] + + @ Enable access faults on domains 0 & 1 + mov r0, #0x55 @ 01010101b + mcr p15, 0, r0, c3, c0, 0 + + mov r1, #'2' + str r1, [r2] + + @ + @ Check for security extensions + @ + mrc p15, 0, r0, c0, c1, 1 + and r0, #0xF0 + @ - Present + ldrne r0,=KERNEL_BASE + mcrne p15, 0, r0, c12, c0, 0 @ Set the VBAR (brings exceptions into high memory) + @ - Absent + mrceq p15, 0, r0, c1, c0, 0 @ Set SCTLR.V + orreq r0, #0x2000 + mcreq p15, 0, r0, c1, c0, 0 + + mov r1, #'-' + str r1, [r2] + + @ Prepare for interrupts + cps #18 @ IRQ Mode + ldr sp, =irqstack+0x1000 @ Set up stack + cps #23 @ Abort Mode + ldr sp, =abortstack+0x1000 + cps #19 + + mov r1, #'a' + str r1, [r2] + mov r1, #'r' + str r1, [r2] + mov r1, #'m' + str r1, [r2] + mov r1, #13 + str r1, [r2] + mov r1, #10 + str r1, [r2] + +.extern bss_start +.extern bss_size_div_4 +.zero_bss: + ldr r0, =bss_start + ldr r1, =bss_end + mov r3, #0 +.zero_bss_loop: + str r3, [r0],#4 + cmp r0, r1 + bls .zero_bss_loop + +.goto_c: + ldr sp, =0x80000000-8 @ Set up stack (top of user range) + ldr r0, =kmain + mov pc, r0 +1: b 1b @ Infinite loop + +.comm irqstack, 0x1000 @ ; 4KiB Stack +.comm abortstack, 0x1000 @ ; 4KiB Stack + +.extern SyscallHandler +SVC_Handler: +@ sub lr, #4 + srsdb sp!, #19 @ Save state to stack + cpsie ifa, #19 @ Ensure we're in supervisor with interrupts enabled (should already be there) + push {r0-r12} + + ldr r4, [lr,#-4] + mvn r5, #0xFF000000 + and r4, r5 + + tst r4, #0x1000 + bne .arm_specifics + + push {r4} + + mov r0, sp + ldr r4, =SyscallHandler + blx r4 + +@ ldr r0, =csSyscallPrintRetAddr +@ ldr r1, [sp,#9*4+5*4] +@ ldr r4, =Log +@ blx r4 + + pop {r2} @ errno + pop {r0,r1} @ Ret/RetHi + add sp, #2*4 @ Saved r2/r3 + + pop {r4-r12} + rfeia sp! @ Pop state (actually RFEFD) +.arm_specifics: + and r4, #0xFF + mov r0, r4 @ Number + mov r1, sp @ Arguments + + ldr r4, =ARMv7_int_HandleSyscalls + blx r4 + + add sp, #4*4 + pop {r4-r12} + rfeia sp! + + +.globl gpIRQHandler +gpIRQHandler: .long 0 +IRQ_saved_sp: .long 0 +IRQ_saved_lr: .long 0 +.globl IRQHandler +IRQHandler: + sub lr, #4 @ Adjust LR to the correct value + srsdb sp!, #19 @ Switch to supervisor mode (DDI0406B D1.6.5) (actually SRSFD) + cps #19 + + PUSH_GPRS + +@ ldr r0, =csIRQ_Tag +@ ldr r1, =csIRQ_Fmt +@ ldr r4, =Log_Debug +@ blx r4 + + @ Call the registered handler + ldr r0, gpIRQHandler + blx r0 + + @ Restore CPU state + POP_GPRS + cpsie i + rfeia sp! @ Pop state (actually RFEFD) + bx lr + +.globl DataAbort +DataAbort: + sub lr, #8 @ Adjust LR to the correct value + srsdb sp!, #23 @ Switch to supervisor mode (DDI0406B D1.6.5) (actually SRSFD) +@ cpsid ifa, #19 + PUSH_GPRS + + mov r3, #0 @ not a prefetch abort + mrc p15, 0, r2, c5, c0, 0 @ Read DFSR (Data Fault Status Register) to R2 + mrc p15, 0, r1, c6, c0, 0 @ Read DFAR (Data Fault Address Register) into R1 + mov r0, lr @ PC + ldr r4, =MM_PageFault + blx r4 + + POP_GPRS + rfeia sp! @ Pop state (actually RFEFD) + +.globl PrefetchAbort +PrefetchAbort: + sub lr, #4 @ Adjust LR to the correct value + srsdb sp!, #23 @ Switch to supervisor mode (DDI0406B D1.6.5) (actually SRSFD) +@ cpsid ifa, #19 + PUSH_GPRS + + ldr r0, =csAbort_Tag + ldr r1, =csPrefetchAbort_Fmt +# mov r2, lr + mrc p15, 0, r2, c6, c0, 2 @ Read IFAR (Instruction Fault Address Register) into R3 + mrc p15, 0, r3, c5, c0, 1 @ Read IFSR (Instruction Fault Status Register) into R3 + ldr r5, =Log_Error + blx r5 + +.loop: + wfi + b .loop +.globl Undef_Handler +Undef_Handler: + wfi + b Undef_Handler + +.globl abort +abort: + wfi + b abort + + +.section .rodata +csIRQ_Tag: +csAbort_Tag: + .asciz "ARMv7" +csIRQ_Fmt: + .asciz "IRQ" +csDataAbort_Fmt: + .asciz "Data Abort - %p accessed %p, DFSR=%x Unk:%x Unk:%x" +csPrefetchAbort_Fmt: + .asciz "Prefetch Abort at %p, IFSR=%x" +csSyscallPrintRetAddr: + .asciz "Syscall ret to %p" + +.section .padata +.globl kernel_table0 + +kernel_table0: + .long 0x00000402 @ Identity map the first 1 MiB + .rept 0x7FC - 1 + .long 0 + .endr + .long user_table1_map + 0x000 - KERNEL_BASE + 1 @ 0x7FC00000 + .long user_table1_map + 0x400 - KERNEL_BASE + 1 @ 0x7FD00000 + .long user_table1_map + 0x800 - KERNEL_BASE + 1 @ KStacks + .long user_table1_map + 0xC00 - KERNEL_BASE + 1 + @ 0x80000000 - User/Kernel split + .long 0x00000402 @ Map first 4 MiB to 2GiB (KRW only) + .long 0x00100402 @ + .long 0x00200402 @ + .long 0x00300402 @ + .rept 0xF00 - 0x800 - 4 + .long 0 + .endr +#if PCI_PADDR + .long PCI_PADDR + 0*(1 << 20) + 0x402 @ Map PCI config space + .long PCI_PADDR + 1*(1 << 20) + 0x402 + .long PCI_PADDR + 2*(1 << 20) + 0x402 + .long PCI_PADDR + 3*(1 << 20) + 0x402 + .long PCI_PADDR + 4*(1 << 20) + 0x402 + .long PCI_PADDR + 5*(1 << 20) + 0x402 + .long PCI_PADDR + 6*(1 << 20) + 0x402 + .long PCI_PADDR + 7*(1 << 20) + 0x402 + .long PCI_PADDR + 8*(1 << 20) + 0x402 + .long PCI_PADDR + 9*(1 << 20) + 0x402 + .long PCI_PADDR + 10*(1 << 20) + 0x402 + .long PCI_PADDR + 11*(1 << 20) + 0x402 + .long PCI_PADDR + 12*(1 << 20) + 0x402 + .long PCI_PADDR + 13*(1 << 20) + 0x402 + .long PCI_PADDR + 14*(1 << 20) + 0x402 + .long PCI_PADDR + 15*(1 << 20) + 0x402 +#else + .rept 16 + .long 0 + .endr +#endif + .long hwmap_table_0 + 0x000 - KERNEL_BASE + 1 + .long hwmap_table_0 + 0x400 - KERNEL_BASE + 1 + .long hwmap_table_0 + 0x800 - KERNEL_BASE + 1 + .long hwmap_table_0 + 0xC00 - KERNEL_BASE + 1 + .rept 0xFF8 - 0xF00 - 16 - 4 + .long 0 + .endr + @ Page fractals + .long kernel_table1_map + 0x000 - KERNEL_BASE + 1 + .long kernel_table1_map + 0x400 - KERNEL_BASE + 1 + .long kernel_table1_map + 0x800 - KERNEL_BASE + 1 + .long kernel_table1_map + 0xC00 - KERNEL_BASE + 1 + .long kernel_exception_map + 0x000 - KERNEL_BASE + 1 + .long kernel_exception_map + 0x400 - KERNEL_BASE + 1 + .long kernel_exception_map + 0x800 - KERNEL_BASE + 1 + .long kernel_exception_map + 0xC00 - KERNEL_BASE + 1 + +@ PID0 user table +.globl user_table1_map +@ User table1 data table (only the first half is needed) +@ - Abused to provide kernel stacks in the unused half of the table +user_table1_map: @ Size = 4KiB (only 2KiB used) + .rept 0x800/4-1 + .long 0 + .endr + .long user_table1_map - KERNEL_BASE + 0x13 @ ...1FF000 = 0x7FDFF000 + @ Kernel stack zone + .long kernel_table0 + 0x0000 - KERNEL_BASE + 0x13 @ ...200000 = 0x7FE00000 + .long kernel_table0 + 0x1000 - KERNEL_BASE + 0x13 @ ...201000 = 0x7FE01000 + .rept (0x800/4)-(MM_KSTACK_SIZE/0x1000)-2 + .long 0 + .endr + #if MM_KSTACK_SIZE != 0x2000 + #error Kernel stack size not changed in start.S + #endif + .long stack + 0x0000 - KERNEL_BASE + 0x13 @ Kernel Stack + .long stack + 0x1000 - KERNEL_BASE + 0x13 @ + +.globl kernel_table1_map +kernel_table1_map: @ Size = 4KiB + .rept (0xF00+16)/4 + .long 0 + .endr + .long hwmap_table_0 - KERNEL_BASE + 0x13 + .rept 0xFF8/4 - (0xF00+16)/4 - 1 + .long 0 + .endr + .long kernel_table1_map - KERNEL_BASE + 0x13 + .long kernel_exception_map - KERNEL_BASE + 0x13 + +@ Hardware mappings +.globl hwmap_table_0 +hwmap_table_0: + .long UART0_PADDR + 0x13 @ UART0 + .rept 1024 - 1 + .long 0 + .endr +.globl kernel_exception_map +kernel_exception_map: + @ Padding + .rept 1024-256 + .long 0 + .endr + @ Align to nearly the end + .rept 256-16 + .long 0 + .endr + .long 0x212 @ Map first page for exceptions (Kernel RO, Execute) + .rept 16-1-2 + .long 0 + .endr + .long gUsertextPhysStart + 0x22 @ User .text (User RO, Kernel RW, because both is COW) + .long 0 + +.section .padata +stack: + .space MM_KSTACK_SIZE, 0 @ Original kernel stack + +// vim: ts=8 ft=armv7 + diff --git a/KernelLand/Kernel/arch/armv6/time.c b/KernelLand/Kernel/arch/armv6/time.c new file mode 100644 index 00000000..d4ae4fa6 --- /dev/null +++ b/KernelLand/Kernel/arch/armv6/time.c @@ -0,0 +1,16 @@ +/* + * Acess2 + * + * ARM7 Time code + * arch/arm7/time.c + */ +#include + +// === GLOBALS === +tTime giTimestamp; + +// === CODE === +tTime now(void) +{ + return giTimestamp; +} -- 2.20.1