From 1c1fce9b136d6df62852ca105428cc3d8b7b1769 Mon Sep 17 00:00:00 2001 From: Curle Date: Fri, 11 Dec 2020 20:51:12 +0000 Subject: [PATCH] More attempts to fix paging. Still missing the stack. --- chroma/inc/kernel/boot/boot.h | 25 +++++++++ chroma/inc/kernel/system/memory.h | 11 ++++ chroma/system/memory/paging.c | 84 +++++++++++++++++++++---------- 3 files changed, 93 insertions(+), 27 deletions(-) diff --git a/chroma/inc/kernel/boot/boot.h b/chroma/inc/kernel/boot/boot.h index 2607484..2a66234 100644 --- a/chroma/inc/kernel/boot/boot.h +++ b/chroma/inc/kernel/boot/boot.h @@ -11,6 +11,31 @@ extern "C" { #endif +/* +;* Memory map +;* 0h - 600h reserved for the system +;* 600h - 800h stage1 (MBR/VBR, boot.bin) +;* 800h - 6C00h stage2 (this) +;* 6C00h - 7C00h stack (7000h - 700Fh SMP trampoline code) +;* 8000h - 9000h bootboot structure +;* 9000h - A000h environment + * V----------- CAN REMOVE -----------V +;* A000h - B000h disk buffer / PML4 +;* B000h - C000h PDPE, higher half core 4K slots +;* C000h - D000h PDE 4K +;* D000h - E000h PTE 4K +;* E000h - F000h PDPE, 4G physical RAM identity mapped 2M +;* F000h - 10000h PDE 2M +;* 10000h - 11000h PDE 2M +;* 11000h - 12000h PDE 2M +;* 12000h - 13000h PDE 2M +;* 13000h - 14000h PTE 4K + * ∧----------- CAN REMOVE -----------∧ +;* 14000h - 9F000h core stacks (1k per core) +;* +;* At first big enough free hole, initrd. Usually at 1Mbyte. +*/ + /* ELF headers hate me. * Let's do this the hard way. */ diff --git a/chroma/inc/kernel/system/memory.h b/chroma/inc/kernel/system/memory.h index 589c57f..f621b78 100644 --- a/chroma/inc/kernel/system/memory.h +++ b/chroma/inc/kernel/system/memory.h @@ -112,6 +112,17 @@ #define FB_REGION 0xFFFFFFFFFC000000ull // Cannot move! #define FB_PHYSICAL 0x00000000FD000000ull // Physical location of the Framebuffer #define KERNEL_REGION 0xFFFFFFFFFFE00000ull // -2MiB, from bootloader +#define KERNEL_TEXT 0x0000000000002000ull // Offset of symbols from the kernel text + +#define KERNEL_PHYSICAL KernelLocation // The located kernel from the bootstrap process +#define KERNEL_END KernelLocation + (KernelEnd - KernelAddr) + +#define CODE_STACK_PHYSICAL 0x0000000000006C00ull // The base of the stack running the C code we enter with +#define CODE_STACK_END 0x0000000000007C00ull +#define CORE_STACK_PHYSICAL 0x0000000000014000ull // The first CPU core's stack +#define CORE_STACK_END 0x0000000000015000ull +#define STACK_TOP 0xFFFFFFFFFFFFF000ull // The start of the highest stack +#define MEM_CEILING 0xFFFFFFFFFFFFFFFFull // The top of the stack in the map #define USER_REGION 0x00007FFFFFFFFFFFull // Not needed yet, but we're higher half so we might as well be thorough diff --git a/chroma/system/memory/paging.c b/chroma/system/memory/paging.c index 9215ba8..96eb6bf 100644 --- a/chroma/system/memory/paging.c +++ b/chroma/system/memory/paging.c @@ -112,6 +112,26 @@ const char* IntToAscii(int In) { } +static void GetPageFromTables(address_space_t* AddressSpace, size_t VirtualAddress, size_t** Page) { + + //ASSERT(Page != NULL); + //ASSERT(AddressSpace != NULL); + + size_t* Pagetable = AddressSpace->PML4; + for(int Level = 4; Level > 1; Level--) { + size_t* Entry = &Pagetable[(VirtualAddress >> (12u + 9u * (Level - 1))) & 0x1FFU]; + + ASSERT(*Entry & PAGE_PRESENT, "Page not present during retrieval"); + SerialPrintf("[ mem] Retrieval of level %d:%d of 0x%p is 0x%p\r\n", Level, (12u + 9u * (Level - 1)), VirtualAddress, (size_t) Entry); + + Pagetable = (size_t*)((char*)(*Entry & 0x7ffffffffffff000ull) + DIRECT_REGION); + } + + ASSERT(Pagetable[(VirtualAddress >> 12U) & 0x1FFU] & PAGE_PRESENT, "PDPE not present during retrieval"); + *Page = &Pagetable[(VirtualAddress >> 12U) & 0x1FFU]; + +} + void InitPaging() { StackFreeList = (list_entry_t) { &StackFreeList, &StackFreeList }; @@ -127,8 +147,12 @@ void InitPaging() { .PML4 = PhysAllocateZeroMem(PAGE_SIZE) }; - size_t* Pagetable = KernelAddressSpace.PML4; + //address_space_t InitialPaging = (address_space_t) { + // .Lock = {0}, + // .PML4 = (size_t*) ReadControlRegister(3) + //}; + size_t* Pagetable = KernelAddressSpace.PML4; //SerialPrintf("[ Mem] About to identity map the higher half.\n"); // Identity map the higher half for(int i = 256; i < 512; i++) { @@ -152,11 +176,11 @@ void InitPaging() { SerialPrintf("[ Mem] Mapping kernel into new memory map.\r\n"); //TODO: Disallow execution of rodata and data, and bootldr/environment - for(void* Address = CAST(void*, KERNEL_REGION); - Address < CAST(void*, KERNEL_REGION + (KernelEnd - KernelAddr)); + for(void* Address = CAST(void*, KERNEL_PHYSICAL + KERNEL_TEXT); + Address < CAST(void*, KERNEL_END); Address = CAST(void*, CAST(char*, Address) + PAGE_SIZE)) { - SerialPrintf("[ mem] Mapping 0x%p to 0x%p, relative to kernel at 0x%p\r\n", (CAST(size_t, Address) - KERNEL_REGION) + KernelLocation, Address, (CAST(size_t, Address) - KERNEL_REGION)); - MapVirtualMemory(&KernelAddressSpace, Address, (CAST(size_t, Address) - KERNEL_REGION) + KernelLocation, MAP_EXEC); + SerialPrintf("[ mem] Mapping 0x%p to 0x%p, relative to kernel at 0x%p\r\n", CAST(size_t, Address), CAST(size_t, (CAST(size_t, Address) - KERNEL_PHYSICAL) + KERNEL_REGION), CAST(size_t, Address) - KERNEL_PHYSICAL); + MapVirtualMemory(&KernelAddressSpace, CAST(void*, (CAST(size_t, Address) - KERNEL_PHYSICAL) + KERNEL_REGION), CAST(size_t, Address), MAP_EXEC); } /*for(void* Address = CAST(void*, KERNEL_REGION + 0x2000); @@ -165,14 +189,38 @@ void InitPaging() { MapVirtualMemory(&KernelAddressSpace, Address, (CAST(size_t, Address) - KERNEL_REGION) + KERNEL_PHYSICAL_2, MAP_EXEC); }*/ SerialPrintf("[ mem] Framebuffer at 0x%p, is 0x%p long. Mapping to 0x%p.\r\n", bootldr.fb_ptr, bootldr.fb_size, FB_REGION); - for(void* Address = CAST(void*, FB_REGION); - Address < CAST(void*, bootldr.fb_size + FB_REGION); + for(void* Address = CAST(void*, FB_PHYSICAL); + Address < CAST(void*, bootldr.fb_size + FB_PHYSICAL); Address = CAST(void*, CAST(char*, Address) + PAGE_SIZE)) { - MapVirtualMemory(&KernelAddressSpace, Address, (CAST(size_t, Address) - FB_REGION) + FB_PHYSICAL, MAP_WRITE); + MapVirtualMemory(&KernelAddressSpace, CAST(void*, (CAST(size_t, Address) - FB_PHYSICAL) + FB_REGION), CAST(size_t, Address), MAP_WRITE); } - SerialPrintf("[ Mem] Kernel mapped into pagetables. New PML4 at 0x%p\r\n", KernelAddressSpace.PML4); + SerialPrintf("[ mem] Stack at 0x%p, mapping to UNKNOWN, top reaching UNKNOWN\r\n", CODE_STACK_PHYSICAL); + SerialPrintf("[ mem] Core 1 stack at 0x%p, mapping to 0x%p : 0x%p\r\n", CORE_STACK_PHYSICAL, STACK_TOP, MEM_CEILING); + for(void* Address = CAST(void*, CORE_STACK_PHYSICAL); + Address < CAST(void*, CORE_STACK_END); + Address = CAST(void*, CAST(char*, Address) + PAGE_SIZE)) { + MapVirtualMemory(&KernelAddressSpace, CAST(void*, STACK_TOP + (CAST(size_t, Address) - CORE_STACK_PHYSICAL)), CAST(size_t, Address), MAP_WRITE); + } + + SerialPrintf("[ Mem] Kernel mapped into pagetables. New PML4 at 0x%p / 0x%p\r\n", (size_t) KernelAddressSpace.PML4, (size_t) Pagetable); SerialPrintf("[ Mem] About to move into our own pagetables.\r\n"); + + /*size_t pml4e = (0xffffffffffe021ba >> 39) & 0b111111111; + size_t pdpte = (0xffffffffffe021ba >> 30) & 0b111111111; + size_t pde = (0xffffffffffe021ba >> 21) & 0b111111111; + size_t pte = (0xffffffffffe021ba >> 12) & 0b111111111; + size_t offset = (0xffffffffffe021ba & 0b111111111111);*/ + size_t* selfQuery; + GetPageFromTables(&KernelAddressSpace, 0xffffffffffe021ba, &selfQuery); + // This ^ returns the start of the page where the address is located, which includes flags. + // So we mask them off and add the offset later to retrieve the physical address V + size_t selfQueryRes = *((volatile size_t*)(selfQuery)) & 0x7ffffffffffff000ull; + size_t* initialQueryRes = 0; // TODO: Unstable! + //GetPageFromTables(&InitialPaging, 0xffffffffffe021ba, &initialQueryRes); + + size_t targetAddr = 0xffffffffffe021ba; + SerialPrintf("[ Mem] Sanity check: Virtual Addr 0x%p maps to physical addr 0x%po vs 0x%pb\r\n", targetAddr, (size_t) selfQueryRes + (targetAddr & 0x1FF), (size_t) initialQueryRes); WriteControlRegister(3, (size_t) KernelAddressSpace.PML4); SerialPrintf("[ Mem] We survived!\r\n"); //ASSERT(Allocator != NULL); @@ -195,24 +243,6 @@ static bool ExpandAllocator(size_t NewSize) { return AddPoolToAllocator(Allocator, Pool, AllocSize) != NULL; } -static void GetPageFromTables(address_space_t* AddressSpace, size_t VirtualAddress, size_t** Page) { - - //ASSERT(Page != NULL); - //ASSERT(AddressSpace != NULL); - - size_t* Pagetable = AddressSpace->PML4; - for(int Level = 4; Level > 1; Level--) { - size_t* Entry = &Pagetable[(VirtualAddress >> (12u + 9u * (Level - 1))) & 0x1FFU]; - - ASSERT(*Entry & PAGE_PRESENT, "Page not present during retrieval"); - - Pagetable = (size_t*)((char*)(*Entry & 0x7ffffffffffff000ull) + DIRECT_REGION); - } - - ASSERT(Pagetable[(VirtualAddress >> 12U) & 0x1FFU] & PAGE_PRESENT, "PDPE not present during retrieval"); - *Page = &Pagetable[(VirtualAddress >> 12U) & 0x1FFU]; - -} void SetAddressSpace(address_space_t* AddressSpace) { //ASSERT(AddressSpace != NULL);