Rework memory management.
This commit is contained in:
parent
53506eccb8
commit
baf09c80f2
|
@ -1,12 +1,18 @@
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <kernel/system/interrupts.h>
|
#include <kernel/system/interrupts.h>
|
||||||
|
#include <lainlib/lainlib.h>
|
||||||
|
|
||||||
/************************
|
/************************
|
||||||
*** Team Kitty, 2020 ***
|
*** Team Kitty, 2020 ***
|
||||||
*** Chroma ***
|
*** Chroma ***
|
||||||
***********************/
|
***********************/
|
||||||
|
|
||||||
|
|
||||||
|
/************************************************
|
||||||
|
* C O N S T A N T S A N D M A C R O S
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
#define PAGE_SIZE 4096
|
#define PAGE_SIZE 4096
|
||||||
#define PAGES_PER_BUCKET 8
|
#define PAGES_PER_BUCKET 8
|
||||||
|
|
||||||
|
@ -16,6 +22,20 @@
|
||||||
#define READ_BIT(i) ((OFFSET_BIT(i) >> (i % PAGES_PER_BUCKET)) & 0x1)
|
#define READ_BIT(i) ((OFFSET_BIT(i) >> (i % PAGES_PER_BUCKET)) & 0x1)
|
||||||
#define GET_BUCKET32(i) (*((uint32_t*) (&Memory[i / 32])))
|
#define GET_BUCKET32(i) (*((uint32_t*) (&Memory[i / 32])))
|
||||||
|
|
||||||
|
#define CAST(a, b) ((a) (b))
|
||||||
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||||
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||||
|
|
||||||
|
#define REINTERPRET_CAST(target, intermediate, value) ((target*)((intermediate*)value))
|
||||||
|
|
||||||
|
#define CONCAT(x, y) x ## y
|
||||||
|
#define CONCAT2(x, y) CONCAT(x, y)
|
||||||
|
#define ASSERT(exp, error) \
|
||||||
|
if(!(exp)) SomethingWentWrong(error);
|
||||||
|
// typedef char CONCAT2(static_assert, __LINE__) [(exp) ? 1 : -1]
|
||||||
|
|
||||||
|
#define CLZ(num) (num ? __builtin_clzll(num) : 64)
|
||||||
|
|
||||||
#define IS_ALIGNED(addr) (((size_t) addr | 0xFFFFFFFFFFFFF000) == 0)
|
#define IS_ALIGNED(addr) (((size_t) addr | 0xFFFFFFFFFFFFF000) == 0)
|
||||||
#define PAGE_ALIGN(addr) ((((size_t) addr) & 0xFFFFFFFFFFFFF000) + 0x1000)
|
#define PAGE_ALIGN(addr) ((((size_t) addr) & 0xFFFFFFFFFFFFF000) + 0x1000)
|
||||||
|
|
||||||
|
@ -33,20 +53,206 @@
|
||||||
#define ERR_INST 0x10
|
#define ERR_INST 0x10
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The way we boot, using BOOTBOOT, and the static hard drive images, means
|
||||||
|
* we're limited to Protocol 1 - we cannot ask the bootloader to move anything
|
||||||
|
* around for us.
|
||||||
|
*
|
||||||
|
* That means we need to account for these unmovable sections in the paging system.
|
||||||
|
*
|
||||||
|
* MMIO_REGION
|
||||||
|
* Represents the MMIO symbol defined in the linkerscript and chroma.h.
|
||||||
|
* FB_REGION
|
||||||
|
* Represents the framebuffer used throughout the kernel.
|
||||||
|
* This is likely the most important thing to keep where it is. Without this, we
|
||||||
|
* have no video output.
|
||||||
|
* KERNEL_REGION
|
||||||
|
* This is where the kernel itself is loaded into memory. Protocol 1 means
|
||||||
|
* we're loaded into the -2MB area.
|
||||||
|
* We *CAN* mvoe the kernel about in memory. It's as simple as memcpying it around
|
||||||
|
* and calling a void pointer as a function to return to where we were.
|
||||||
|
* We *CANNOT* move the framebuffer in this manner, as it is set directly by BIOS,
|
||||||
|
* and the graphics device most likely will not allow this to happen.
|
||||||
|
* For this reason, the kernel, framebuffer and MMIO will remain where they are.
|
||||||
|
* Luckily, there are more components of Chroma than the kernel itself. That's what
|
||||||
|
* the kernel heap and kernel stack areas are for.
|
||||||
|
*
|
||||||
|
* USER_REGION
|
||||||
|
* This is the dedicated space 0...7FFFFFFFFFFF for userspace.
|
||||||
|
* No kernel objects or data will be put into this space.
|
||||||
|
* Protocol 1 puts the page tables at 0xA000 by default, so these will have to be moved
|
||||||
|
* up to kernel space.
|
||||||
|
*
|
||||||
|
* KERNEL_STACK_REGION
|
||||||
|
* KERNEL_STACK_END
|
||||||
|
* Encapsulate a 1GB large area of memory, to be used by the kernel for thread & interrupt stacks,
|
||||||
|
* call unwinding and other debug information.
|
||||||
|
*
|
||||||
|
* KERNEL_HEAP_REGION
|
||||||
|
* KERNEL_HEAP_END
|
||||||
|
* Encapsulate another 1GB large area for kernel objects. ie. resources (images, sounds), libraries,
|
||||||
|
* data structures, assorted information about the system.. etc.
|
||||||
|
*
|
||||||
|
* DIRECT_REGION
|
||||||
|
* As mentioned above, the lower half is reserved for user space.
|
||||||
|
* The higher half will be direct-mapped throughout.
|
||||||
|
* This is the cutoff for the higher half - FFFF800000000000.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define MMIO_REGION 0xFFFFFFFFF8000000ull // Cannot move!
|
||||||
|
#define FB_REGION 0xFFFFFFFFFC000000ull // Cannot move!
|
||||||
|
#define FB_PHYSICAL 0x00000000E0000000ull // Physical location of the Framebuffer
|
||||||
|
#define KERNEL_REGION 0xFFFFFFFFFFE00000ull // -2MiB, from bootloader
|
||||||
|
#define KERNEL_PHYSICAL 0x0000000000008000ull // Physical location of the kernel
|
||||||
|
#define KERNEL_PHYSICAL_2 0x000000000011C000ull // For some reason the kernel is split in half
|
||||||
|
|
||||||
|
#define USER_REGION 0x00007FFFFFFFFFFFull // Not needed yet, but we're higher half so we might as well be thorough
|
||||||
|
|
||||||
|
#define KERNEL_STACK_REGION 0xFFFFE00000000000ull // Kernel Stack Space
|
||||||
|
#define KERNEL_STACK_END 0xFFFFE00040000000ull // End of Kernel Stack Space
|
||||||
|
|
||||||
|
#define KERNEL_HEAP_REGION 0xFFFFE00080000000ull // Kernel Object Space (kmalloc will allocate into this region)
|
||||||
|
#define KERNEL_HEAP_END 0xFFFFE000C0000000ull // End of Kernel Object Space
|
||||||
|
|
||||||
|
#define DIRECT_REGION 0xFFFF800000000000ull
|
||||||
|
|
||||||
|
#define LOWER_REGION 0x0000000100000000ull // Lower Memory cutoff - 4GB
|
||||||
|
|
||||||
|
#define PAGE_SHIFT 12
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* T Y P E D E F I N I T I O N S
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
typedef void* directptr_t;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ticketlock_t Lock;
|
||||||
|
|
||||||
|
directptr_t PML4;
|
||||||
|
} address_space_t;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
MAP_WRITE = 0x1,
|
||||||
|
MAP_EXEC = 0x2,
|
||||||
|
} mapflags_t;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
CACHE_NONE,
|
||||||
|
CACHE_WRITE_THROUGH,
|
||||||
|
CACHE_WRITE_BACK,
|
||||||
|
CACHE_WRITE_COMBINING
|
||||||
|
} pagecache_t;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int MaxOrder;
|
||||||
|
|
||||||
|
directptr_t Base;
|
||||||
|
|
||||||
|
directptr_t* List;
|
||||||
|
|
||||||
|
ticketlock_t Lock;
|
||||||
|
} buddy_t;
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* A b s t r a c t A l l o c a t o r
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
const char* IntToAscii(int In);
|
||||||
|
|
||||||
|
typedef void* allocator_t;
|
||||||
|
typedef void* mempool_t;
|
||||||
|
|
||||||
|
allocator_t CreateAllocator(void* Memory);
|
||||||
|
allocator_t CreateAllocatorWithPool(void* Memory, size_t Bytes);
|
||||||
|
|
||||||
|
void DestroyAllocator(allocator_t Allocator);
|
||||||
|
|
||||||
|
mempool_t GetPoolFromAllocator(allocator_t Allocator);
|
||||||
|
|
||||||
|
mempool_t AddPoolToAllocator(allocator_t Allocator, void* Memory, size_t Bytes);
|
||||||
|
void RemovePoolFromAllocator(allocator_t Allocator, mempool_t pool);
|
||||||
|
|
||||||
|
void* AllocatorMalloc (allocator_t Allocator, size_t Bytes);
|
||||||
|
void* AllocatorMalign (allocator_t Allocator, size_t Alignment, size_t Bytes);
|
||||||
|
void* AllocatorRealloc(allocator_t Allocator, void* VirtualAddress, size_t NewSize);
|
||||||
|
void AllocatorFree (allocator_t Allocator, void* VirtualAddress);
|
||||||
|
|
||||||
|
size_t AllocatorGetBlockSize(void* VirtualAddress);
|
||||||
|
|
||||||
|
size_t AllocatorSize(void);
|
||||||
|
size_t AllocatorAlignSize(void);
|
||||||
|
size_t AllocatorMinBlockSize(void);
|
||||||
|
size_t AllocatorMaxBlockSize(void);
|
||||||
|
|
||||||
|
size_t AllocatorPoolOverhead(void);
|
||||||
|
size_t AllocatorAllocateOverhead(void);
|
||||||
|
|
||||||
|
|
||||||
|
size_t AlignUpwards(size_t Pointer, size_t Alignment);
|
||||||
|
size_t AlignDownwards(size_t Pointer, size_t Alignment);
|
||||||
|
void* AlignPointer(const void* Pointer, size_t Alignment);
|
||||||
|
|
||||||
|
|
||||||
|
/************************************************************
|
||||||
|
* C h r o m a M e m o r y M a n a g e m e n t
|
||||||
|
*************************************************************/
|
||||||
|
|
||||||
extern size_t end;
|
extern size_t end;
|
||||||
|
|
||||||
void ListMemoryMap();
|
void ListMemoryMap();
|
||||||
|
|
||||||
void InitMemoryManager();
|
void InitMemoryManager();
|
||||||
|
|
||||||
size_t AllocateFrame();
|
void AddRangeToPhysMem(directptr_t Base, size_t Size);
|
||||||
|
|
||||||
void FreeFrame(size_t FrameNumber);
|
directptr_t PhysAllocateLowMem(size_t Size);
|
||||||
|
|
||||||
size_t SeekFrame();
|
directptr_t PhysAllocateMem(size_t Size);
|
||||||
|
|
||||||
void MemoryTest();
|
directptr_t PhysAllocateZeroMem(size_t Size);
|
||||||
|
|
||||||
void InitPaging();
|
directptr_t PhysAllocateLowZeroMem(size_t Size);
|
||||||
|
|
||||||
void PageFaultHandler(INTERRUPT_FRAME frame);
|
directptr_t PhysAllocatePage();
|
||||||
|
|
||||||
|
void PhysRefPage(directptr_t Page);
|
||||||
|
|
||||||
|
void PhysFreePage(directptr_t Page);
|
||||||
|
|
||||||
|
void FreePhysMem(directptr_t Phys);
|
||||||
|
|
||||||
|
size_t SeekFrame();
|
||||||
|
|
||||||
|
void MemoryTest();
|
||||||
|
|
||||||
|
void InitPaging();
|
||||||
|
|
||||||
|
void TraversePageTables();
|
||||||
|
|
||||||
|
void* memcpy(void* dest, void const* src, size_t len);
|
||||||
|
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* C h r o m a A l l o c a t o r
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
void SetAddressSpace(address_space_t* Space);
|
||||||
|
//TODO: Copy to/from Userspace
|
||||||
|
void MapVirtualMemory(address_space_t* Space, void* VirtualAddress, size_t PhysicalAddress, mapflags_t Flags);
|
||||||
|
void UnmapVirtualMemory(address_space_t* Space, void* VirtualAddress);
|
||||||
|
|
||||||
|
void CacheVirtualMemory(address_space_t* Space, void* VirtualAddress, pagecache_t CacheType);
|
||||||
|
|
||||||
|
void* AllocateMemory(size_t Bits);
|
||||||
|
|
||||||
|
void* ReallocateMemory(void* VirtualAddress, size_t NewSize);
|
||||||
|
|
||||||
|
void FreeMemory(void* VirtualAddress);
|
||||||
|
|
||||||
|
void* AllocateKernelStack();
|
||||||
|
|
||||||
|
void FreeKernelStack(void* StackAddress);
|
||||||
|
|
||||||
|
void PageFaultHandler(INTERRUPT_FRAME Frame);
|
798
chroma/system/memory/abstract_allocator.c
Normal file
798
chroma/system/memory/abstract_allocator.c
Normal file
|
@ -0,0 +1,798 @@
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <kernel/system/memory.h>
|
||||||
|
#include <kernel/system/io.h>
|
||||||
|
|
||||||
|
|
||||||
|
/************************
|
||||||
|
*** Team Kitty, 2020 ***
|
||||||
|
*** Chroma ***
|
||||||
|
***********************/
|
||||||
|
|
||||||
|
|
||||||
|
/************************************************
|
||||||
|
* C O N S T A N T S A N D M A C R O S
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
|
#define BLOCK_FREE (1 << 0)
|
||||||
|
#define BLOCK_PREV_FREE (1 << 1)
|
||||||
|
|
||||||
|
#define BLOCK_OVERHEAD (sizeof(size_t))
|
||||||
|
|
||||||
|
#define BLOCK_OFFSET (offsetof(block_header_t, Size) + sizeof(size_t))
|
||||||
|
|
||||||
|
#define BLOCK_MIN_SIZE (sizeof(block_header_t) - sizeof(block_header_t*))
|
||||||
|
#define BLOCK_MAX_SIZE (CAST(size_t, 1) << FL_LIMIT)
|
||||||
|
|
||||||
|
#define static_assert _Static_assert
|
||||||
|
|
||||||
|
extern void SomethingWentWrong(const char* Message);
|
||||||
|
|
||||||
|
//#define ASSERT(X) _Static_assert(X)
|
||||||
|
/************************************************
|
||||||
|
* S A N I T Y C H E C K S
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
|
//_Static_Assert(sizeof(int) * __CHAR_BIT__ == 32);
|
||||||
|
//_Static_Assert(sizeof(int) * __CHAR_BIT__ == 32);
|
||||||
|
//_Static_Assert(sizeof(size_t) * __CHAR_BIT__ >= 32);
|
||||||
|
//_Static_Assert(sizeof(size_t) * __CHAR_BIT__ <= 64);
|
||||||
|
//_Static_Assert(sizeof(unsigned int) * __CHAR_BIT__ >= SL_INDEX_COUNT);
|
||||||
|
//_Static_Assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
|
||||||
|
|
||||||
|
|
||||||
|
/************************************************
|
||||||
|
* F F S A N D F L S
|
||||||
|
*************************************************/
|
||||||
|
|
||||||
|
#ifdef _cplusplus
|
||||||
|
#define alloc_decl inline
|
||||||
|
#else
|
||||||
|
#define alloc_decl static
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
alloc_decl int Alloc_FindFirstOne(unsigned int word) {
|
||||||
|
return __builtin_ffs(word) - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc_decl int Alloc_FindLastOne(unsigned int word) {
|
||||||
|
const int bit = word ? 32 - __builtin_clz(word) : 0;
|
||||||
|
return bit -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc_decl int Alloc_FindLastOne_64(size_t size) {
|
||||||
|
|
||||||
|
int high = (int)(size >> 32);
|
||||||
|
int bits = 0;
|
||||||
|
|
||||||
|
if(high)
|
||||||
|
bits = 32 + Alloc_FindLastOne(high);
|
||||||
|
else
|
||||||
|
bits = Alloc_FindLastOne((int)size & 0xFFFFFFFF);
|
||||||
|
|
||||||
|
return bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef alloc_decl
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/*********************************************
|
||||||
|
* T Y P E D E F I N I T I O N S
|
||||||
|
**********************************************/
|
||||||
|
|
||||||
|
enum Alloc_Public {
|
||||||
|
|
||||||
|
SL_LIMIT_LN = 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum Alloc_Private {
|
||||||
|
ALIGN_SIZE_LN = 3,
|
||||||
|
ALIGN_SIZE = (1 << ALIGN_SIZE_LN),
|
||||||
|
|
||||||
|
FL_LIMIT = 32,
|
||||||
|
|
||||||
|
SL_INDEX_COUNT = (1 << SL_LIMIT_LN),
|
||||||
|
|
||||||
|
FL_INDEX_SHIFT = (SL_LIMIT_LN + ALIGN_SIZE_LN),
|
||||||
|
FL_INDEX_COUNT = (FL_LIMIT - FL_INDEX_SHIFT + 1),
|
||||||
|
|
||||||
|
SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct block_header_t {
|
||||||
|
struct block_header_t* LastBlock;
|
||||||
|
|
||||||
|
size_t Size; // Not including this header
|
||||||
|
|
||||||
|
struct block_header_t* NextFreeBlock;
|
||||||
|
struct block_header_t* LastFreeBlock;
|
||||||
|
} block_header_t ;
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct allocator_control_t {
|
||||||
|
|
||||||
|
block_header_t BlockNull;
|
||||||
|
|
||||||
|
unsigned int FirstLevel_Bitmap;
|
||||||
|
unsigned int SecondLevel_Bitmap[FL_INDEX_COUNT];
|
||||||
|
|
||||||
|
block_header_t* Blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
|
||||||
|
} allocator_control_t;
|
||||||
|
|
||||||
|
|
||||||
|
/**********************************************************************************
|
||||||
|
* B L O C K _ H E A D E R _ T M E M B E R F U N C T I O N S
|
||||||
|
************************************************************************************/
|
||||||
|
|
||||||
|
static size_t BlockSize(const block_header_t* Block) {
|
||||||
|
return Block->Size & ~(BLOCK_FREE | BLOCK_PREV_FREE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockSetSize(block_header_t* Block, size_t Size) {
|
||||||
|
Block->Size = Size | (Block->Size & (BLOCK_FREE | BLOCK_PREV_FREE));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int BlockIsLast(const block_header_t* Block) {
|
||||||
|
return BlockSize(Block) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int BlockIsFree(const block_header_t* Block) {
|
||||||
|
return CAST(int, Block->Size & BLOCK_FREE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockSetFree(block_header_t* Block) {
|
||||||
|
Block->Size |= BLOCK_FREE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockSetUsed(block_header_t* Block) {
|
||||||
|
Block->Size &= ~BLOCK_FREE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int BlockPrevIsFree(const block_header_t* Block) {
|
||||||
|
return CAST(int, Block->Size & BLOCK_PREV_FREE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockSetPrevFree(block_header_t* Block) {
|
||||||
|
Block->Size |= BLOCK_PREV_FREE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockSetPrevUsed(block_header_t* Block) {
|
||||||
|
Block->Size &= ~BLOCK_PREV_FREE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* WhichBlock(const void* Address) {
|
||||||
|
return CAST(block_header_t*, CAST(unsigned char*, Address) - BLOCK_OFFSET);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* WhereBlock(const block_header_t* Block) {
|
||||||
|
return CAST(void*, CAST(unsigned char*, Block) + BLOCK_OFFSET);
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* OffsetToBlock(const void* Address, size_t Size) {
|
||||||
|
return CAST(block_header_t*, CAST(ptrdiff_t, Address) + Size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* BlockGetPrevious(const block_header_t* Current) {
|
||||||
|
ASSERT(BlockPrevIsFree(Current), "BlockGetPrevious: Previous block NOT free");
|
||||||
|
return Current->LastBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* BlockGetNext(const block_header_t* Current) {
|
||||||
|
block_header_t* NextBlock = OffsetToBlock(WhereBlock(Current), BlockSize(Current) - BLOCK_OVERHEAD);
|
||||||
|
ASSERT(!BlockIsLast(Current), "BlockGetNext: Current block is last!");
|
||||||
|
return NextBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* BlockLinkToNext(block_header_t* Current) {
|
||||||
|
block_header_t* NextBlock = BlockGetNext(Current);
|
||||||
|
NextBlock->LastBlock = Current;
|
||||||
|
return NextBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockMarkFree(block_header_t* Current) {
|
||||||
|
block_header_t* NextBlock = BlockLinkToNext(Current);
|
||||||
|
BlockSetPrevFree(NextBlock);
|
||||||
|
BlockSetFree(Current);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BlockMarkUsed(block_header_t* Current) {
|
||||||
|
block_header_t* NextBlock = BlockGetNext(Current);
|
||||||
|
BlockSetPrevUsed(NextBlock);
|
||||||
|
BlockSetUsed(Current);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/***********************************************************************************
|
||||||
|
* P O I N T E R A L I G N M E N T F U N C T I O N S
|
||||||
|
************************************************************************************/
|
||||||
|
|
||||||
|
size_t AlignUpwards(size_t Pointer, size_t Alignment) {
|
||||||
|
//ASSERT(((Alignment & (Alignment - 1)) == 0));
|
||||||
|
return (Pointer + (Alignment - 1)) & ~(Alignment - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AlignDownwards(size_t Pointer, size_t Alignment) {
|
||||||
|
//ASSERT((Alignment & (Alignment - 1) == 0));
|
||||||
|
return (Pointer - (Pointer & (Alignment - 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AlignPointer(const void* Pointer, size_t Alignment) {
|
||||||
|
|
||||||
|
const ptrdiff_t AlignedPointer =
|
||||||
|
((
|
||||||
|
CAST(ptrdiff_t, Pointer)
|
||||||
|
+ (Alignment - 1))
|
||||||
|
& ~(Alignment - 1)
|
||||||
|
);
|
||||||
|
ASSERT(((Alignment & (Alignment - 1)) == 0), "AlignPointer: Requested alignment not aligned!");
|
||||||
|
|
||||||
|
return CAST(void*, AlignedPointer);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/***********************************************************************************
|
||||||
|
* M E M O R Y B L O C K M A N A G E M E N T
|
||||||
|
************************************************************************************/
|
||||||
|
|
||||||
|
static size_t AlignRequestSize(size_t Size, size_t Alignment) {
|
||||||
|
size_t Adjustment = 0;
|
||||||
|
|
||||||
|
if(Size) {
|
||||||
|
const size_t Aligned = AlignUpwards(Size, Alignment);
|
||||||
|
|
||||||
|
if(Aligned < BLOCK_MAX_SIZE)
|
||||||
|
Adjustment = MAX(Aligned, BLOCK_MIN_SIZE);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return Adjustment;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void InsertMapping(size_t Size, int* FirstLevelIndex, int* SecondLevelIndex) {
|
||||||
|
int FirstLevel, SecondLevel;
|
||||||
|
|
||||||
|
if(Size < SMALL_BLOCK_SIZE) {
|
||||||
|
|
||||||
|
FirstLevel = 0;
|
||||||
|
SecondLevel = CAST(int, Size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
|
||||||
|
} else {
|
||||||
|
FirstLevel = Alloc_FindLastOne_64(Size);
|
||||||
|
SecondLevel = CAST(int, Size >> (FirstLevel - SL_LIMIT_LN)) ^ (1 << SL_LIMIT_LN);
|
||||||
|
|
||||||
|
FirstLevel -= (FL_INDEX_SHIFT - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
*FirstLevelIndex = FirstLevel;
|
||||||
|
*SecondLevelIndex = SecondLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void RoundUpBlockSize(size_t Size, int* FirstLevelIndex, int* SecondLevelIndex) {
|
||||||
|
if(Size >= SMALL_BLOCK_SIZE) {
|
||||||
|
const size_t Rounded = (1 << (Alloc_FindLastOne_64(Size) - SL_LIMIT_LN)) - 1;
|
||||||
|
Size += Rounded;
|
||||||
|
}
|
||||||
|
|
||||||
|
InsertMapping(Size, FirstLevelIndex, SecondLevelIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* FindSuitableBlock(allocator_control_t* Controller, int* FirstLevelIndex, int* SecondLevelIndex) {
|
||||||
|
int FirstLevel = *FirstLevelIndex;
|
||||||
|
int SecondLevel = *SecondLevelIndex;
|
||||||
|
|
||||||
|
unsigned int SLMap = Controller->SecondLevel_Bitmap[FirstLevel] & (~0U << SecondLevel);
|
||||||
|
|
||||||
|
if(!SLMap) {
|
||||||
|
|
||||||
|
const unsigned int FLMap = Controller->FirstLevel_Bitmap & (~0U << (FirstLevel + 1));
|
||||||
|
|
||||||
|
if(!FLMap)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
FirstLevel = Alloc_FindFirstOne(FLMap);
|
||||||
|
*FirstLevelIndex = FirstLevel;
|
||||||
|
SLMap = Controller->SecondLevel_Bitmap[FirstLevel];
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(SLMap, "FindSuitableBlock: Second level bitmap not present!");
|
||||||
|
|
||||||
|
SecondLevel = Alloc_FindFirstOne(SLMap);
|
||||||
|
*SecondLevelIndex = SecondLevel;
|
||||||
|
|
||||||
|
return Controller->Blocks[FirstLevel][SecondLevel];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void RemoveFreeBlock(allocator_control_t* Controller, block_header_t* Block, int FirstLevel, int SecondLevel) {
|
||||||
|
block_header_t* PreviousBlock = Block->LastFreeBlock;
|
||||||
|
block_header_t* NextBlock = Block->NextFreeBlock;
|
||||||
|
|
||||||
|
ASSERT(PreviousBlock, "RemoveFreeBlock: PreviousBlock is null!");
|
||||||
|
ASSERT(NextBlock, "RemoveFreeBlock: NextBlock is null!");
|
||||||
|
|
||||||
|
NextBlock->LastFreeBlock = PreviousBlock;
|
||||||
|
PreviousBlock->NextFreeBlock = NextBlock;
|
||||||
|
|
||||||
|
if(Controller->Blocks[FirstLevel][SecondLevel] == Block) {
|
||||||
|
Controller->Blocks[FirstLevel][SecondLevel] = NextBlock;
|
||||||
|
|
||||||
|
if(NextBlock == &Controller->BlockNull) {
|
||||||
|
Controller->SecondLevel_Bitmap[FirstLevel] &= ~(1U << SecondLevel);
|
||||||
|
|
||||||
|
if(!Controller->SecondLevel_Bitmap[FirstLevel]) {
|
||||||
|
Controller->FirstLevel_Bitmap &= ~(1U << FirstLevel);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void InsertFreeBlock(allocator_control_t* Controller, block_header_t* NewBlock, int FirstLevel, int SecondLevel) {
|
||||||
|
block_header_t* Current = Controller->Blocks[FirstLevel][SecondLevel];
|
||||||
|
|
||||||
|
ASSERT(Current, "InsertFreeBlock: Current Block is null!");
|
||||||
|
if(!Current) {
|
||||||
|
SerialPrintf("Extra info: \r\n\tFirst Level: %x Second Level: %x\r\nFirst Level bitmap: %x, Second Level bitmap: %x\r\n\tBlocks %x, BlocksAddress: %x", FirstLevel, SecondLevel, Controller->FirstLevel_Bitmap, Controller->SecondLevel_Bitmap, Controller->Blocks, Controller->Blocks[FirstLevel][SecondLevel]);
|
||||||
|
for(;;){}
|
||||||
|
}
|
||||||
|
ASSERT(NewBlock, "InsertFreeBlock: New Block is null!");
|
||||||
|
|
||||||
|
NewBlock->NextFreeBlock = Current;
|
||||||
|
NewBlock->LastFreeBlock = &Controller->BlockNull;
|
||||||
|
|
||||||
|
Current->LastFreeBlock = NewBlock;
|
||||||
|
|
||||||
|
ASSERT(WhereBlock(NewBlock) == AlignPointer(WhereBlock(NewBlock), ALIGN_SIZE), "InsertFreeBlock: Current block is not memory aligned!");
|
||||||
|
|
||||||
|
Controller->Blocks[FirstLevel][SecondLevel] = NewBlock;
|
||||||
|
Controller->FirstLevel_Bitmap |= (1U << FirstLevel);
|
||||||
|
Controller->SecondLevel_Bitmap[FirstLevel] |= (1U << SecondLevel);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void RemoveBlock(allocator_control_t* Controller, block_header_t* Block) {
|
||||||
|
int FirstLevel, SecondLevel;
|
||||||
|
|
||||||
|
InsertMapping(BlockSize(Block), &FirstLevel, &SecondLevel);
|
||||||
|
RemoveFreeBlock(Controller, Block, FirstLevel, SecondLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void InsertBlock(allocator_control_t* Controller, block_header_t* Block) {
|
||||||
|
int FirstLevel, SecondLevel;
|
||||||
|
InsertMapping(BlockSize(Block), &FirstLevel, &SecondLevel);
|
||||||
|
InsertFreeBlock(Controller, Block, FirstLevel, SecondLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int CanBlockSplit(block_header_t* Block, size_t NewSize) {
|
||||||
|
return BlockSize(Block) >= sizeof(block_header_t) + NewSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* SplitBlock(block_header_t* Block, size_t NewSize) {
|
||||||
|
block_header_t* Overlap = OffsetToBlock(WhereBlock(Block), NewSize - BLOCK_OVERHEAD);
|
||||||
|
|
||||||
|
const size_t RemainingSize = BlockSize(Block) - (NewSize + BLOCK_OVERHEAD);
|
||||||
|
|
||||||
|
ASSERT(WhereBlock(Overlap) == AlignPointer(WhereBlock(Overlap), ALIGN_SIZE), "SplitBlock: Requested size results in intermediary block which is not aligned!");
|
||||||
|
|
||||||
|
ASSERT(BlockSize(Block) == RemainingSize + NewSize + BLOCK_OVERHEAD, "SplitBlock: Maths error!");
|
||||||
|
|
||||||
|
BlockSetSize(Overlap, RemainingSize);
|
||||||
|
|
||||||
|
ASSERT(BlockSize(Overlap) >= BLOCK_MIN_SIZE, "SplitBlock: Requested size results in new block that is too small!");
|
||||||
|
|
||||||
|
BlockSetSize(Block, NewSize);
|
||||||
|
|
||||||
|
BlockMarkFree(Overlap);
|
||||||
|
|
||||||
|
return Overlap;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* MergeBlockDown(block_header_t* Previous, block_header_t* Block) {
|
||||||
|
ASSERT(!BlockIsLast(Previous), "MergeBlockDown: Previous block is the last block! (Current block is first block?)");
|
||||||
|
|
||||||
|
Previous->Size += BlockSize(Block) + BLOCK_OVERHEAD;
|
||||||
|
BlockLinkToNext(Previous);
|
||||||
|
return Previous;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* MergeEmptyBlockDown(allocator_control_t* Controller, block_header_t* Block) {
|
||||||
|
|
||||||
|
if(BlockPrevIsFree(Block)) {
|
||||||
|
block_header_t* Previous = BlockGetPrevious(Block);
|
||||||
|
ASSERT(Previous, "MergeEmptyBlockDown: Previous block is null!");
|
||||||
|
ASSERT(BlockIsFree(Previous), "MergeEmptyBlockDown: Previous block is free!");
|
||||||
|
RemoveBlock(Controller, Previous);
|
||||||
|
Block = MergeBlockDown(Previous, Block);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Block;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* MergeNextBlockDown(allocator_control_t* Controller, block_header_t* Block) {
|
||||||
|
block_header_t* NextBlock = BlockGetNext(Block);
|
||||||
|
ASSERT(NextBlock, "MergeNextBlockDown: Next Block is null!");
|
||||||
|
|
||||||
|
if(BlockIsFree(NextBlock)) {
|
||||||
|
ASSERT(!BlockIsLast(Block), "MergeNextBlockDown: Current block is the last block!");
|
||||||
|
RemoveBlock(Controller, NextBlock);
|
||||||
|
Block = MergeBlockDown(Block, NextBlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Block;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void TrimBlockFree(allocator_control_t* Controller, block_header_t* Block, size_t Size) {
|
||||||
|
ASSERT(BlockIsFree(Block), "TrimBlockFree: Current block is wholly free!");
|
||||||
|
|
||||||
|
if(CanBlockSplit(Block, Size)) {
|
||||||
|
block_header_t* RemainingBlock = SplitBlock(Block, Size);
|
||||||
|
|
||||||
|
BlockLinkToNext(Block);
|
||||||
|
|
||||||
|
BlockSetPrevFree(RemainingBlock);
|
||||||
|
|
||||||
|
InsertBlock(Controller, RemainingBlock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void TrimBlockUsed(allocator_control_t* Controller, block_header_t* Block, size_t Size) {
|
||||||
|
ASSERT(!BlockIsFree(Block), "TrimBlockUsed: The current block is wholly used!");
|
||||||
|
|
||||||
|
if(CanBlockSplit(Block, Size)) {
|
||||||
|
|
||||||
|
block_header_t* RemainingBlock = SplitBlock(Block, Size);
|
||||||
|
|
||||||
|
BlockSetPrevUsed(RemainingBlock);
|
||||||
|
|
||||||
|
RemainingBlock = MergeNextBlockDown(Controller, RemainingBlock);
|
||||||
|
|
||||||
|
InsertBlock(Controller, RemainingBlock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static block_header_t* TrimBlockLeadingFree(allocator_control_t* Controller, block_header_t* Block, size_t Size) {
|
||||||
|
block_header_t* RemainingBlock = Block;
|
||||||
|
|
||||||
|
if(CanBlockSplit(Block, Size)) {
|
||||||
|
RemainingBlock = SplitBlock(Block, Size - BLOCK_OVERHEAD);
|
||||||
|
|
||||||
|
BlockSetPrevFree(RemainingBlock);
|
||||||
|
|
||||||
|
BlockLinkToNext(Block);
|
||||||
|
InsertBlock(Controller, Block);
|
||||||
|
}
|
||||||
|
|
||||||
|
return RemainingBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
static block_header_t* LocateFreeBlock(allocator_control_t* Controller, size_t Size) {
|
||||||
|
|
||||||
|
int FirstLevel = 0, SecondLevel = 0;
|
||||||
|
|
||||||
|
block_header_t* Block = 0;
|
||||||
|
|
||||||
|
if(Size) {
|
||||||
|
|
||||||
|
RoundUpBlockSize(Size, &FirstLevel, &SecondLevel);
|
||||||
|
|
||||||
|
if(FirstLevel < FL_INDEX_COUNT) {
|
||||||
|
Block = FindSuitableBlock(Controller, &FirstLevel, &SecondLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if(Block) {
|
||||||
|
ASSERT(BlockSize(Block) >= Size, "LocateFreeBlock: Found a block that is too small!");
|
||||||
|
RemoveFreeBlock(Controller, Block, FirstLevel, SecondLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Block;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* PrepareUsedBlock(allocator_control_t* Controller, block_header_t* Block, size_t Size) {
|
||||||
|
void* Pointer = 0;
|
||||||
|
|
||||||
|
if(Block){
|
||||||
|
ASSERT(Size, "PrepareUsedBlock: Size is 0!");
|
||||||
|
TrimBlockFree(Controller, Block, Size);
|
||||||
|
BlockMarkUsed(Block);
|
||||||
|
Pointer = WhereBlock(Block);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/***********************************************************************************
|
||||||
|
* C O N T R O L L E R M A N A G E M E N T
|
||||||
|
************************************************************************************/
|
||||||
|
|
||||||
|
static void ConstructController(allocator_control_t* Controller) {
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
Controller->BlockNull.NextFreeBlock = &Controller->BlockNull;
|
||||||
|
Controller->BlockNull.LastFreeBlock = &Controller->BlockNull;
|
||||||
|
|
||||||
|
Controller->FirstLevel_Bitmap = 0;
|
||||||
|
|
||||||
|
for ( i = 0; i < FL_INDEX_COUNT; i++) {
|
||||||
|
Controller->SecondLevel_Bitmap[i] = 0;
|
||||||
|
|
||||||
|
for (j = 0; j < SL_INDEX_COUNT; j++) {
|
||||||
|
Controller->Blocks[i][j] = &Controller->BlockNull;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/***********************************************************************************
|
||||||
|
* H E A D E R ( A P I ) F U N C T I O N S
|
||||||
|
************************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
size_t AllocatorGetBlockSize(void* Memory) {
|
||||||
|
size_t Size = 0;
|
||||||
|
|
||||||
|
if(Memory) {
|
||||||
|
const block_header_t* Block = WhichBlock(Memory);
|
||||||
|
Size = BlockSize(Block);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AllocatorSize(void) {
|
||||||
|
return sizeof(allocator_control_t);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AllocatorAlignSize(void) {
|
||||||
|
return ALIGN_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AllocatorMinBlockSize(void) {
|
||||||
|
return BLOCK_MIN_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AllocatorMaxBlockSize(void) {
|
||||||
|
return BLOCK_MAX_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AllocatorPoolOverhead(void) {
|
||||||
|
return 2* BLOCK_OVERHEAD; // Free block + Sentinel block
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AllocatorAllocateOverhead(void) {
|
||||||
|
return BLOCK_OVERHEAD;
|
||||||
|
}
|
||||||
|
|
||||||
|
mempool_t AddPoolToAllocator(allocator_t Allocator, void* Address, size_t Size) {
|
||||||
|
|
||||||
|
block_header_t* Block;
|
||||||
|
block_header_t* NextBlock;
|
||||||
|
|
||||||
|
const size_t PoolOverhead = AllocatorPoolOverhead();
|
||||||
|
const size_t PoolBytes = AlignDownwards(Size - PoolOverhead, ALIGN_SIZE);
|
||||||
|
|
||||||
|
if(((ptrdiff_t) Address % ALIGN_SIZE) != 0) {
|
||||||
|
SerialPrintf("Memory manager error at [%s:%x]: Memory not properly aligned.\r\n", __FILE__, __LINE__);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if( PoolBytes < BLOCK_MIN_SIZE || PoolBytes > BLOCK_MAX_SIZE) {
|
||||||
|
SerialPrintf("Memory manager error at [%s:%x]: Memory Size out of bounds: 0x%x-0x%x: 0x%x.\r\n", __FILE__, __LINE__, (unsigned int)(PoolOverhead + BLOCK_MIN_SIZE), (unsigned int)(PoolOverhead + BLOCK_MAX_SIZE) / 256, PoolBytes);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
Block = OffsetToBlock(Address, -(ptrdiff_t)BLOCK_OVERHEAD);
|
||||||
|
BlockSetSize(Block, PoolBytes);
|
||||||
|
BlockSetFree(Block);
|
||||||
|
BlockSetPrevUsed(Block);
|
||||||
|
|
||||||
|
InsertBlock(CAST(allocator_control_t*, Allocator), Block);
|
||||||
|
|
||||||
|
NextBlock = BlockLinkToNext(Block);
|
||||||
|
BlockSetSize(NextBlock, 0);
|
||||||
|
BlockSetUsed(NextBlock);
|
||||||
|
BlockSetPrevFree(NextBlock);
|
||||||
|
|
||||||
|
return Address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemovePoolFromAllocator(allocator_t Allocator, mempool_t Pool){
|
||||||
|
allocator_control_t* Controller = CAST(allocator_control_t*, Allocator);
|
||||||
|
block_header_t* Block = OffsetToBlock(Pool, -(int)BLOCK_OVERHEAD);
|
||||||
|
|
||||||
|
int FirstLevel = 0, SecondLevel = 0;
|
||||||
|
|
||||||
|
ASSERT(BlockIsFree(Block), "RemovePoolFromAllocator: Current block is free!");
|
||||||
|
ASSERT(!BlockIsFree(BlockGetNext(Block)), "RemovePoolFromAllocator: Next Block is not free!");
|
||||||
|
ASSERT(BlockSize(BlockGetNext(Block)) == 0, "RemovePoolFromAllocator: Next block is size 0!");
|
||||||
|
|
||||||
|
RoundUpBlockSize(BlockSize(Block), &FirstLevel, &SecondLevel);
|
||||||
|
RemoveFreeBlock(Controller, Block, FirstLevel, SecondLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
int TestBuiltins() {
|
||||||
|
/* Verify ffs/fls work properly. */
|
||||||
|
int TestsFailed = 0;
|
||||||
|
TestsFailed += (Alloc_FindFirstOne(0) == -1) ? 0 : 0x1;
|
||||||
|
TestsFailed += (Alloc_FindLastOne(0) == -1) ? 0 : 0x2;
|
||||||
|
TestsFailed += (Alloc_FindFirstOne(1) == 0) ? 0 : 0x4;
|
||||||
|
TestsFailed += (Alloc_FindLastOne(1) == 0) ? 0 : 0x8;
|
||||||
|
TestsFailed += (Alloc_FindFirstOne(0x80000000) == 31) ? 0 : 0x10;
|
||||||
|
TestsFailed += (Alloc_FindFirstOne(0x80008000) == 15) ? 0 : 0x20;
|
||||||
|
TestsFailed += (Alloc_FindLastOne(0x80000008) == 31) ? 0 : 0x40;
|
||||||
|
TestsFailed += (Alloc_FindLastOne(0x7FFFFFFF) == 30) ? 0 : 0x80;
|
||||||
|
|
||||||
|
TestsFailed += (Alloc_FindLastOne_64(0x80000000) == 31) ? 0 : 0x100;
|
||||||
|
TestsFailed += (Alloc_FindLastOne_64(0x100000000) == 32) ? 0 : 0x200;
|
||||||
|
TestsFailed += (Alloc_FindLastOne_64(0xffffffffffffffff) == 63) ? 0 : 0x400;
|
||||||
|
|
||||||
|
if (TestsFailed) {
|
||||||
|
SerialPrintf("TestBuiltins: %x ffs/fls tests failed.\n", TestsFailed);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TestsFailed;
|
||||||
|
}
|
||||||
|
|
||||||
|
allocator_t CreateAllocator(void* Memory) {
|
||||||
|
if(TestBuiltins())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (((ptrdiff_t) Memory % ALIGN_SIZE) != 0) {
|
||||||
|
SerialPrintf("Memory manager error at [%s:%x]: Memory not properly aligned.\r\n", __FILE__, __LINE__);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstructController(CAST(allocator_control_t*, Memory));
|
||||||
|
|
||||||
|
return CAST(allocator_t, Memory);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
allocator_t CreateAllocatorWithPool(void* Memory, size_t Bytes) {
|
||||||
|
allocator_t Allocator = CreateAllocator(Memory);
|
||||||
|
|
||||||
|
AddPoolToAllocator(Allocator, (char*)Memory + AllocatorSize(), Bytes - AllocatorSize());
|
||||||
|
|
||||||
|
return Allocator;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DestroyAllocator(allocator_t Allocator) {
|
||||||
|
(void) Allocator;
|
||||||
|
}
|
||||||
|
|
||||||
|
mempool_t GetPoolFromAllocator(allocator_t Allocator) {
|
||||||
|
return CAST(mempool_t, (char*)Allocator + AllocatorSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
/***********************************************************************************
|
||||||
|
* S T D L I B A L L O C A T E F U N C T I O N S
|
||||||
|
************************************************************************************/
|
||||||
|
|
||||||
|
void* AllocatorMalloc(allocator_t Allocator, size_t Size) {
|
||||||
|
allocator_control_t* Controller = CAST(allocator_control_t*, Allocator);
|
||||||
|
const size_t Adjustment = AlignRequestSize(Size, ALIGN_SIZE);
|
||||||
|
block_header_t* Block = LocateFreeBlock(Controller, Adjustment);
|
||||||
|
return PrepareUsedBlock(Controller, Block, Adjustment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocatorMalign(allocator_t Allocator, size_t Alignment, size_t Size) {
|
||||||
|
allocator_control_t* Controller = CAST(allocator_control_t*, Allocator);
|
||||||
|
const size_t Adjustment = AlignRequestSize(Size, ALIGN_SIZE);
|
||||||
|
|
||||||
|
|
||||||
|
const size_t MinimumGap = sizeof(block_header_t);
|
||||||
|
|
||||||
|
const size_t SizeWithGap = AlignRequestSize(Adjustment + Alignment + MinimumGap, Alignment);
|
||||||
|
|
||||||
|
const size_t AlignedSize = (Adjustment && Alignment > ALIGN_SIZE) ? SizeWithGap : Adjustment;
|
||||||
|
|
||||||
|
block_header_t* Block = LocateFreeBlock(Controller, AlignedSize);
|
||||||
|
|
||||||
|
ASSERT(sizeof(block_header_t) == BLOCK_MIN_SIZE + BLOCK_OVERHEAD, "AllocatorMalign: Maths error!");
|
||||||
|
|
||||||
|
if(Block) {
|
||||||
|
void* Address = WhereBlock(Block);
|
||||||
|
void* AlignedAddress = AlignPointer(Address, Alignment);
|
||||||
|
|
||||||
|
size_t Gap = CAST(size_t, CAST(ptrdiff_t, AlignedAddress) - CAST(ptrdiff_t, Address));
|
||||||
|
|
||||||
|
if(Gap) {
|
||||||
|
if(Gap << MinimumGap) {
|
||||||
|
const size_t GapRemaining = MinimumGap - Gap;
|
||||||
|
const size_t Offset = MAX(GapRemaining, Alignment);
|
||||||
|
const void* NextAlignedAddress = CAST(void*, CAST(ptrdiff_t, AlignedAddress) + Offset);
|
||||||
|
|
||||||
|
AlignedAddress = AlignPointer(NextAlignedAddress, Alignment);
|
||||||
|
|
||||||
|
Gap = CAST(size_t, CAST(ptrdiff_t, AlignedAddress) - CAST(ptrdiff_t, Address));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(Gap >= MinimumGap, "AllocatorMalign: Maths error 2!");
|
||||||
|
|
||||||
|
Block = TrimBlockLeadingFree(Controller, Block, Gap);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return PrepareUsedBlock(Controller, Block, Adjustment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AllocatorFree(allocator_t Allocator, void* Address) {
|
||||||
|
if(Address) {
|
||||||
|
allocator_control_t* Controller = CAST(allocator_control_t*, Allocator);
|
||||||
|
block_header_t* Block = WhichBlock(Address);
|
||||||
|
ASSERT(!BlockIsFree(Block), "AllocatorFree: Attempting to free a freed block!");
|
||||||
|
|
||||||
|
BlockMarkFree(Block);
|
||||||
|
Block = MergeEmptyBlockDown(Controller, Block);
|
||||||
|
Block = MergeNextBlockDown(Controller, Block);
|
||||||
|
|
||||||
|
InsertBlock(Controller, Block);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Realloc should, with:
|
||||||
|
* * A valid size with an invalid pointer:
|
||||||
|
* - Allocate space
|
||||||
|
* * An invalid size with a valid pointer:
|
||||||
|
* - Free Space
|
||||||
|
* * An invalid request:
|
||||||
|
* - Do nothing
|
||||||
|
* * A valid extension request:
|
||||||
|
* - Leave the new area as it is
|
||||||
|
* // TODO: memset this area to 0.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
void* AllocatorRealloc(allocator_t Allocator, void* Address, size_t NewSize) {
|
||||||
|
allocator_control_t* Controller = CAST(allocator_control_t*, Allocator);
|
||||||
|
|
||||||
|
void* Pointer = 0;
|
||||||
|
|
||||||
|
// Valid address, invalid size; free
|
||||||
|
if(Address && NewSize == 0)
|
||||||
|
AllocatorFree(Allocator, Address);
|
||||||
|
|
||||||
|
else if (!Address) // Invalid address; alloc
|
||||||
|
AllocatorMalloc(Allocator, NewSize);
|
||||||
|
|
||||||
|
else {
|
||||||
|
block_header_t* Block = WhichBlock(Address);
|
||||||
|
block_header_t* NextBlock = BlockGetNext(Block);
|
||||||
|
|
||||||
|
const size_t CurrentSize = BlockSize(Block);
|
||||||
|
const size_t CombinedSize = CurrentSize + BlockSize(NextBlock) + BLOCK_OVERHEAD;
|
||||||
|
|
||||||
|
const size_t AdjustedSize = AlignRequestSize(NewSize, ALIGN_SIZE);
|
||||||
|
|
||||||
|
ASSERT(!BlockIsFree(Block), "AllocatorRealloc: Requested block is not free!");
|
||||||
|
|
||||||
|
if(AdjustedSize > CurrentSize && (!BlockIsFree(NextBlock) || AdjustedSize > CombinedSize)) {
|
||||||
|
// We're going to need more room
|
||||||
|
|
||||||
|
Pointer = AllocatorMalloc(Allocator, NewSize);
|
||||||
|
if(Pointer) {
|
||||||
|
const size_t MinimumSize = MIN(CurrentSize, NewSize);
|
||||||
|
memcpy(Pointer, Address, MinimumSize);
|
||||||
|
AllocatorFree(Allocator, Address);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if( AdjustedSize > CurrentSize) {
|
||||||
|
MergeNextBlockDown(Controller, Block);
|
||||||
|
BlockMarkUsed(Block);
|
||||||
|
}
|
||||||
|
|
||||||
|
TrimBlockUsed(Controller, Block, AdjustedSize);
|
||||||
|
Pointer = Address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Pointer;
|
||||||
|
}
|
310
chroma/system/memory/legacypaging.c
Normal file
310
chroma/system/memory/legacypaging.c
Normal file
|
@ -0,0 +1,310 @@
|
||||||
|
|
||||||
|
void InitPagingT() {
|
||||||
|
|
||||||
|
size_t* PML4 = (size_t*) 0xFFA000; // Layer 4
|
||||||
|
size_t* PDPE_RAM = (size_t*) 0xFFE000; // Layer 3, contains map for the first 4GB of RAM
|
||||||
|
size_t* PDE_RAM = (size_t*) 0xFFF000;
|
||||||
|
|
||||||
|
size_t* PDPE_KERNEL = (size_t*) 0xFFB000; // Layer 3, contains map for the Kernel and everything it needs to run.
|
||||||
|
size_t* PDE_KERNEL_FB = (size_t*) 0xFFC000; // Layer 2, contains map for the linear framebuffer.
|
||||||
|
|
||||||
|
size_t* PT_KERNEL = (size_t*) 0xFFD000; // Layer 1, the page table for the kernel itself.
|
||||||
|
|
||||||
|
size_t fb_ptr = (size_t) &fb;
|
||||||
|
|
||||||
|
SET_ADDRESS(PML4, PDPE_RAM); // 3rd Layer entry for RAM
|
||||||
|
SET_ADDRESS(PML4 + LAST_ENTRY, PDPE_KERNEL); // 3rd Layer entry for Kernel
|
||||||
|
|
||||||
|
SET_ADDRESS(PDPE_KERNEL + LAST_ENTRY, PDE_KERNEL_FB); // 2nd Layer entry for the framebuffer
|
||||||
|
|
||||||
|
// Set the 480th entry (PDE_KERNEL_FB + (480 * 8))
|
||||||
|
// To the framebuffer + flags
|
||||||
|
SET_ADDRESS(PDE_KERNEL_FB + 3840, USERWRITEABLE_FLAGS(fb_ptr));
|
||||||
|
|
||||||
|
// In 4 byte increments, we're gonna map 3840 (the framebuffer)
|
||||||
|
// Up to (4096 - 8) in the PDE_KERNEL_FB with 2MB paging.
|
||||||
|
size_t MappingIterations = 1;
|
||||||
|
for(size_t i = 3844; i < 4088; i += 4) {
|
||||||
|
SET_ADDRESS(PDE_KERNEL_FB + i, USERWRITEABLE_FLAGS(fb_ptr) + (MappingIterations * (2 * MiB)));
|
||||||
|
MappingIterations++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we map the last entry of PDE_KERNEL_FB to our Page Table
|
||||||
|
SET_ADDRESS(PDE_KERNEL_FB + LAST_ENTRY, PT_KERNEL);
|
||||||
|
|
||||||
|
// Mapping the kernel into the page tables....
|
||||||
|
|
||||||
|
SET_ADDRESS(PT_KERNEL, 0xFF8001); // bootldr, bootinfo
|
||||||
|
SET_ADDRESS(PT_KERNEL + 8, 0xFF9001); // environment
|
||||||
|
|
||||||
|
// Map the kernel itself
|
||||||
|
SET_ADDRESS(PT_KERNEL + 16, KernelAddr + 1);
|
||||||
|
|
||||||
|
// Iterate through the pages, identity mapping each one
|
||||||
|
MappingIterations = 1;
|
||||||
|
size_t MappingOffset = 0x14;
|
||||||
|
for(size_t i = 0; i < ((KernelEnd - KernelAddr) >> 12); i++) {
|
||||||
|
// Page Table + (0x10 increasing by 0x04 each time) = x * 4KiB
|
||||||
|
SET_ADDRESS(PT_KERNEL + MappingOffset, (MappingIterations * (4 * KiB)));
|
||||||
|
MappingOffset += 4;
|
||||||
|
MappingIterations++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we need to map the core stacks. Top-down, from 0xDFF8
|
||||||
|
// There's always at least one core, so we do that one fixed.
|
||||||
|
// TODO: Account for 0-core CPUs
|
||||||
|
SET_ADDRESS(PT_KERNEL + LAST_ENTRY, 0xF14003);
|
||||||
|
MappingIterations = 1;
|
||||||
|
// For every core:
|
||||||
|
for(size_t i = 0; i < (bootldr.numcores + 3U) >> 2; i++) {
|
||||||
|
// PT_KERNEL[512 - (iterations + 1)] = 0x14003 + (iterations * page-width)
|
||||||
|
SET_ADDRESS(PT_KERNEL + LAST_ENTRY - (MappingIterations * 8), 0xF14003 + (4096 * MappingIterations));
|
||||||
|
MappingIterations++;
|
||||||
|
}
|
||||||
|
|
||||||
|
SET_ADDRESS(PDPE_RAM, PDE_RAM + PAGE_PRESENT + PAGE_RW);
|
||||||
|
SET_ADDRESS(PDPE_RAM + 8, 0xF10000 + PAGE_PRESENT + PAGE_RW);
|
||||||
|
SET_ADDRESS(PDPE_RAM + 16, 0xF11000 + PAGE_PRESENT + PAGE_RW);
|
||||||
|
SET_ADDRESS(PDPE_RAM + 24, 0xF12000 + PAGE_PRESENT + PAGE_RW);
|
||||||
|
|
||||||
|
// Identity map 4GB of ram
|
||||||
|
// Each page table can only hold 512 entries, but we
|
||||||
|
// just set up 4 of them - overflowing PDE_RAM (0xF000)
|
||||||
|
// will take us into 0x10000, into 0x11000, into 0x120000.
|
||||||
|
for(size_t i = 0; i < 512 * 4/*GB*/; i++) {
|
||||||
|
// add PDE_RAM, 4
|
||||||
|
// mov eax, 0x83
|
||||||
|
// add eax, 2*1024*1024
|
||||||
|
SET_ADDRESS(PDE_RAM + (i * 4), USERWRITEABLE_FLAGS(i * (2 * MiB)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map first 2MB of memory
|
||||||
|
SET_ADDRESS(PDE_RAM, 0xF13000 + PAGE_PRESENT + PAGE_RW);
|
||||||
|
|
||||||
|
for(size_t i = 0; i < 512; i++) {
|
||||||
|
SET_ADDRESS(0xF13000 + i * 4, i * (4 * KiB) + PAGE_PRESENT + PAGE_RW);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0xA000 should now contain our memory map.
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void TraversePageTables() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void InitPagingOldImpl() {
|
||||||
|
|
||||||
|
// Disable paging so that we can work with the pagetable
|
||||||
|
//size_t registerTemp = ReadControlRegister(0);
|
||||||
|
//UNSET_PGBIT(registerTemp);
|
||||||
|
//WriteControlRegister(0, registerTemp);
|
||||||
|
|
||||||
|
// Clear space for our pagetable
|
||||||
|
size_t PagetableDest = 0x1000;
|
||||||
|
memset((char*)PagetableDest, 0, 4096);
|
||||||
|
|
||||||
|
// Start setting pagetable indexes
|
||||||
|
*((size_t*)PagetableDest) = 0x2003; // PDP at 0x2000, present & r/w
|
||||||
|
*((size_t*)PagetableDest + 0x1000) = 0x3003; // PDT at 0x3000, present & r/w
|
||||||
|
*((size_t*)PagetableDest + 0x2000) = 0x4003; // PT at 0x4000, present & r/w
|
||||||
|
|
||||||
|
size_t value = 0x3;
|
||||||
|
size_t offset = 8;
|
||||||
|
for(size_t i = 0; i < 512; i++) { // 512 iterations (entries into the page table)
|
||||||
|
*((size_t*) PagetableDest + offset) = value; // We're setting 512 bytes with x003
|
||||||
|
// (identity mapping the first 4 megabytes of memory)
|
||||||
|
// (mapping the page table to itself)
|
||||||
|
value += 4096; // Point to start of next page
|
||||||
|
offset += 8; // + 8 bytes (next entry in list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable PAE paging
|
||||||
|
size_t reg = ReadControlRegister(4);
|
||||||
|
SET_PAEBIT(reg);
|
||||||
|
WriteControlRegister(4, reg);
|
||||||
|
|
||||||
|
WriteControlRegister(3, PagetableDest);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* size_t registerTemp = ReadControlRegister(4);
|
||||||
|
if(registerTemp & (1 << 7)) {
|
||||||
|
TOGGLE_PGEBIT(registerTemp);
|
||||||
|
WriteControlRegister(4, registerTemp);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(registerTemp & (1 << 7))
|
||||||
|
WriteControlRegister(4, registerTemp ^ (1 << 7));
|
||||||
|
|
||||||
|
size_t CPUIDReturn;
|
||||||
|
asm volatile("cpuid" : "=d" (CPUIDReturn) : "a" (0x80000001) : "%rbx", "%rcx");
|
||||||
|
|
||||||
|
if(CPUIDReturn & (1 << 26)) {
|
||||||
|
SerialPrintf("System supports 1GB pages.\r\n");
|
||||||
|
|
||||||
|
if(registerTemp & (1 << 12)) {
|
||||||
|
SerialPrintf("PML5 paging available - using that instead.\r\n");
|
||||||
|
|
||||||
|
if(MemorySize > (1ULL << 57))
|
||||||
|
SerialPrintf("System has over 128Petabytes of RAM. Please consider upgrading the OS on your supercomputer.\r\n");
|
||||||
|
|
||||||
|
size_t MaxPML5 = 1;
|
||||||
|
size_t MaxPML4 = 1;
|
||||||
|
size_t MaxPDP = 512;
|
||||||
|
|
||||||
|
size_t LastPML4Entry = 512;
|
||||||
|
size_t LastPDPEntry = 512;
|
||||||
|
|
||||||
|
size_t MemorySearchDepth = MemorySize;
|
||||||
|
|
||||||
|
while(MemorySearchDepth > (256ULL << 30)) {
|
||||||
|
MaxPML5++;
|
||||||
|
MemorySearchDepth -= (256ULL << 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(MaxPML5 > 512)
|
||||||
|
MaxPML5 = 512;
|
||||||
|
|
||||||
|
if(MemorySearchDepth) {
|
||||||
|
LastPDPEntry = ( (MemorySearchDepth + ((1 << 30) - 1)) & (~0ULL << 30)) >> 30;
|
||||||
|
|
||||||
|
if(MaxPML5 > 512)
|
||||||
|
MaxPML5 = 512;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t PML4Size = PAGETABLE_SIZE * MaxPML5;
|
||||||
|
size_t PDPSize = PML4Size * MaxPML4;
|
||||||
|
|
||||||
|
size_t PML4Base = AllocatePagetable(PML4Size + PDPSize);
|
||||||
|
size_t PDPBase = PML4Base + PML4Size;
|
||||||
|
|
||||||
|
for(size_t PML5Entry = 0; PML5Entry < MaxPML5; PML5Entry++) {
|
||||||
|
Pagetable[PML5Entry] = PML4Base + (PML5Entry << 12);
|
||||||
|
|
||||||
|
if(PML5Entry == (MaxPML5 - 1))
|
||||||
|
MaxPML4 = LastPML4Entry;
|
||||||
|
|
||||||
|
for(size_t PML4Entry = 0; PML4Entry < MaxPML4; PML4Entry++) {
|
||||||
|
|
||||||
|
((size_t*) Pagetable[PML5Entry])[PML4Entry] = PDPBase + (((PML5Entry << 9) + PML5Entry) << 12);
|
||||||
|
|
||||||
|
if( (PML5Entry == (MaxPML5 - 1)) && (PML4Entry == (MaxPML4 -1)) )
|
||||||
|
MaxPDP = LastPDPEntry;
|
||||||
|
|
||||||
|
for(size_t PDPEntry = 0; PDPEntry < MaxPDP; PDPEntry++) {
|
||||||
|
((size_t* ) ((size_t* ) Pagetable[PML5Entry])[PML4Entry])[PDPEntry] = ( ((PML5Entry << 18) + (PML4Entry << 9) + PDPEntry) << 30) | (0x83);
|
||||||
|
}
|
||||||
|
|
||||||
|
((size_t* ) Pagetable[PML5Entry])[PML4Entry] |= 0x3;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pagetable[PML5Entry] |= 0x3;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
SerialPrintf("PML4 available - using that instead.\r\n");
|
||||||
|
size_t MemorySearchDepth = MemorySize;
|
||||||
|
|
||||||
|
if(MemorySearchDepth > (1ULL << 48))
|
||||||
|
SerialPrintf("RAM limited to 256TB.\r\n");
|
||||||
|
|
||||||
|
size_t MaxPML4 = 1;
|
||||||
|
size_t MaxPDP = 512;
|
||||||
|
|
||||||
|
size_t LastPDPEntry = 512;
|
||||||
|
|
||||||
|
while(MemorySearchDepth > (512ULL << 30)) {
|
||||||
|
MaxPML4++;
|
||||||
|
MemorySearchDepth -= (512ULL << 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(MaxPML4 > 512)
|
||||||
|
MaxPML4 = 512;
|
||||||
|
|
||||||
|
if(MemorySearchDepth) {
|
||||||
|
LastPDPEntry = ( (MemorySearchDepth + ((1 << 30) - 1)) & (~0ULL << 30)) >> 30;
|
||||||
|
|
||||||
|
if(LastPDPEntry > 512)
|
||||||
|
LastPDPEntry = 512;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t PDPSize = PAGETABLE_SIZE * MaxPML4;
|
||||||
|
size_t PDPBase = AllocatePagetable(PDPSize);
|
||||||
|
|
||||||
|
for(size_t PML4Entry = 0; PML4Entry < MaxPML4; PML4Entry++) {
|
||||||
|
Pagetable[PML4Entry] = PDPBase + (PML4Entry << 12);
|
||||||
|
|
||||||
|
if(PML4Entry == (MaxPML4 - 1)) {
|
||||||
|
MaxPDP = LastPDPEntry;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t PDPEntry = 0; PDPEntry < MaxPDP; PDPEntry++) {
|
||||||
|
((size_t* ) Pagetable[PML4Entry])[PDPEntry] = (((PML4Entry << 9) + PDPEntry) << 30) | 0x83;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pagetable[PML4Entry] |= 0x3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
SerialPrintf("System does not support 1GB pages - using 2MiB paging instead.\r\n");
|
||||||
|
|
||||||
|
size_t MemorySearchDepth = MemorySize;
|
||||||
|
|
||||||
|
if(MemorySearchDepth > (1ULL << 48)) {
|
||||||
|
SerialPrintf("Usable RAM is limited to 256TB, and the page table alone will use 1GB of space in memory.\r\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t MaxPML4 = 1, MaxPDP = 512, MaxPD = 512, LastPDPEntry = 1;
|
||||||
|
|
||||||
|
while(MemorySearchDepth > (512ULL << 30)) {
|
||||||
|
MaxPML4++;
|
||||||
|
MemorySearchDepth -= (512ULL << 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(MaxPML4 > 512)
|
||||||
|
MaxPML4 = 512;
|
||||||
|
|
||||||
|
if(MemorySearchDepth) {
|
||||||
|
LastPDPEntry = ((MemorySearchDepth + ((1 << 30) - 1)) & (~0ULL << 30)) >> 30;
|
||||||
|
|
||||||
|
if(LastPDPEntry > 512)
|
||||||
|
LastPDPEntry = 512;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t PDPSize = PAGETABLE_SIZE * MaxPML4;
|
||||||
|
size_t PDSize = PDPSize * MaxPDP;
|
||||||
|
|
||||||
|
size_t PDPBase = AllocatePagetable(PDPSize + PDSize);
|
||||||
|
size_t PDBase = PDPBase + PDSize;
|
||||||
|
|
||||||
|
for(size_t PML4Entry = 0; PML4Entry < MaxPML4; PML4Entry++) {
|
||||||
|
Pagetable[PML4Entry] = PDBase + (PML4Entry << 12);
|
||||||
|
|
||||||
|
if(PML4Entry == (MaxPML4 - 1)) {
|
||||||
|
MaxPDP = LastPDPEntry;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(size_t PDPEntry = 0; PDPEntry < MaxPDP; PDPEntry++) {
|
||||||
|
( (size_t* ) Pagetable[PML4Entry])[PDPEntry] = PDBase + (((PML4Entry << 9) + PDPEntry) << 12);
|
||||||
|
|
||||||
|
for(size_t PDEntry = 0; PDEntry < MaxPD; PDEntry++) {
|
||||||
|
( (size_t* ) ((size_t*) Pagetable[PML4Entry])[PDPEntry])[PDEntry] = (( (PML4Entry << 18) + (PDPEntry << 9) + PDPEntry) << 21) | 0x83;
|
||||||
|
}
|
||||||
|
|
||||||
|
( (size_t* ) Pagetable[PML4Entry])[PDPEntry] |= 0x3;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pagetable[PML4Entry] |= 0x3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
WriteControlRegister(3, Pagetable);
|
||||||
|
|
||||||
|
registerTemp = ReadControlRegister(4);
|
||||||
|
if(!(registerTemp & (1 << 7))) {
|
||||||
|
TOGGLE_PGEBIT(registerTemp);
|
||||||
|
WriteControlRegister(4, registerTemp);
|
||||||
|
}*/
|
53
chroma/system/memory/legacyphysmem.c
Normal file
53
chroma/system/memory/legacyphysmem.c
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
|
||||||
|
size_t AllocateFrame() {
|
||||||
|
size_t FreePage = SeekFrame();
|
||||||
|
SET_BIT(FreePage);
|
||||||
|
return FreePage;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeFrame(size_t Frame) {
|
||||||
|
UNSET_BIT(Frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t SeekFrame() {
|
||||||
|
for(size_t i = 0; i < MemoryPages; i++) {
|
||||||
|
if(!READ_BIT(i))
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
SerialPrintf("Memory manager: Critical!\r\n");
|
||||||
|
return (size_t) -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryTest() {
|
||||||
|
SerialPrintf("Initializing basic memory test..\r\n");
|
||||||
|
bool Passed = true;
|
||||||
|
size_t FirstPage = SeekFrame();
|
||||||
|
/*(void* FirstPageAlloc = (void*)*/ AllocateFrame();
|
||||||
|
size_t SecondPage = SeekFrame();
|
||||||
|
/*void* SecondPageAlloc = (void*)*/ AllocateFrame();
|
||||||
|
|
||||||
|
if(!(FirstPage == 0 && SecondPage == 1)) {
|
||||||
|
Passed = false;
|
||||||
|
SerialPrintf("First iteration: Failed, First page %x, Second page %x.\r\n", FirstPage, SecondPage);
|
||||||
|
}
|
||||||
|
|
||||||
|
FreeFrame(SecondPage);
|
||||||
|
SecondPage = SeekFrame();
|
||||||
|
|
||||||
|
if(SecondPage != 1)
|
||||||
|
Passed = false;
|
||||||
|
|
||||||
|
FreeFrame(FirstPage);
|
||||||
|
FirstPage = SeekFrame();
|
||||||
|
|
||||||
|
if(FirstPage != 0)
|
||||||
|
Passed = false;
|
||||||
|
|
||||||
|
if(Passed)
|
||||||
|
SerialPrintf("Memory test passed.\r\n");
|
||||||
|
else {
|
||||||
|
SerialPrintf("Memory test failed.\r\n");
|
||||||
|
SerialPrintf("First page %x, Second page %x.\r\n", FirstPage, SecondPage);
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
#include <kernel/chroma.h>
|
#include <kernel/chroma.h>
|
||||||
|
#include <lainlib/lainlib.h>
|
||||||
|
|
||||||
/************************
|
/************************
|
||||||
*** Team Kitty, 2020 ***
|
*** Team Kitty, 2020 ***
|
||||||
|
@ -17,8 +18,39 @@
|
||||||
* There, these functions worked, but here, under BIOS, it's a lot more difficult.
|
* There, these functions worked, but here, under BIOS, it's a lot more difficult.
|
||||||
* It will take some time to get these functions working.
|
* It will take some time to get these functions working.
|
||||||
*
|
*
|
||||||
|
* The general plan, being that the BOOTBOOT loader has given us static addresses for all of our doodads,
|
||||||
|
* is to keep the core kernel where it is (FFFFFFFFFFE00000) and load in modules and libraries around it.
|
||||||
|
*
|
||||||
|
* We start in the higher half, so we'll dedicate the lower half (7FFFFFFFFFFF and below) to userspace.
|
||||||
|
*
|
||||||
|
* That means we have about 3 terabytes of RAM for the kernel.
|
||||||
|
* This will be identity mapped, always.
|
||||||
|
*
|
||||||
|
* Handily, since most modern processors ignore the highest 2 bytes of a virtual address, and the kernel
|
||||||
|
* is mapped to 0x80000000000 and above, we can use the nomenclature:
|
||||||
|
* * 0x00007FFFFFFFFFFF and below is user space.
|
||||||
|
* * 0xFFFF800000000000 and above is kernel space.
|
||||||
|
* The processor will ignore the first 4 chars, and this provides a great deal of readability for the
|
||||||
|
* future of the kernel.
|
||||||
|
*
|
||||||
|
* We'll have a kernel heap mapped into this kernel space, as well as a kernel stack (for task switching and error tracing).
|
||||||
|
* These will be 1GB each.
|
||||||
|
* We may have to increase this in the future, once Helix is fully integrated.
|
||||||
|
* Helix will take a lot of memory, as it is a fully featured 3D engine. We may have to implement things like
|
||||||
|
* texture streaming and mipmapping. Minimising RAM usage is NOT a priority for me, but it would be nice
|
||||||
|
* to have a minimum requirement above 32GB.
|
||||||
|
*
|
||||||
|
* // TODO: Expand Kernel Heap
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* //TODO: there are lots of calls to AllocateFrame here, those need to be separated out into AllocateZeroFrame if necessary.
|
||||||
|
*
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
extern size_t _kernel_text_start;
|
||||||
|
extern size_t _kernel_rodata_start;
|
||||||
|
extern size_t _kernel_data_start;
|
||||||
|
|
||||||
//__attribute__((aligned(4096))) static size_t Pagetable[512] = {0};
|
//__attribute__((aligned(4096))) static size_t Pagetable[512] = {0};
|
||||||
|
|
||||||
|
@ -26,317 +58,314 @@
|
||||||
|
|
||||||
#define SET_ADDRESS(a,b) ((*(size_t*) (a)) = (size_t) b)
|
#define SET_ADDRESS(a,b) ((*(size_t*) (a)) = (size_t) b)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It turns out it's useful to have macros for the standard
|
||||||
|
* data size units.
|
||||||
|
*
|
||||||
|
* Who would've thoguht?
|
||||||
|
*/
|
||||||
|
|
||||||
#define KiB 1 * 1024
|
#define KiB 1 * 1024
|
||||||
#define MiB 1 * 1024 * KiB
|
#define MiB 1 * 1024 * KiB
|
||||||
|
|
||||||
#define USERWRITEABLE_FLAGS(a) ((a & 0xFFFFFF00) + 0x83)
|
|
||||||
|
|
||||||
#define PAGE_PRESENT 1
|
#define PAGE_PRESENT 1
|
||||||
#define PAGE_RW 2
|
#define PAGE_RW 2
|
||||||
|
#define PAGE_USER 4
|
||||||
|
#define PAGE_GLOBAL 8
|
||||||
|
|
||||||
|
|
||||||
|
#define USERWRITEABLE_FLAGS(a) ((a & 0xFFFFFF00) + 0x83)
|
||||||
|
|
||||||
|
// The AbstractAllocator control struct
|
||||||
|
static allocator_t Allocator = NULL;
|
||||||
|
// The AbstractAllocator Ticketlock.
|
||||||
|
static ticketlock_t AllocatorLock = {0};
|
||||||
|
|
||||||
|
// Entries to help allocate the Kernel Stack
|
||||||
|
static list_entry_t StackFreeList;
|
||||||
|
static ticketlock_t StackLock = {0};
|
||||||
|
static void* StackPointer = (void*) KERNEL_STACK_REGION;
|
||||||
|
|
||||||
|
// A temporary itoa function for better debugging..
|
||||||
|
const char* IntToAscii(int In) {
|
||||||
|
char* OutputBuffer = " ";
|
||||||
|
|
||||||
|
size_t Temp, i = 0, j = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
Temp = In % 10;
|
||||||
|
OutputBuffer[i++] = (Temp < 10) ? (Temp + '0') : (Temp + 'a' - 10);
|
||||||
|
} while (In /= 10);
|
||||||
|
|
||||||
|
OutputBuffer[i--] = 0;
|
||||||
|
|
||||||
|
for(j = 0; j < i; j++, i--) {
|
||||||
|
Temp = OutputBuffer[j];
|
||||||
|
OutputBuffer[j] = OutputBuffer[i];
|
||||||
|
OutputBuffer[i] = Temp;
|
||||||
|
}
|
||||||
|
|
||||||
|
return OutputBuffer;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void InitPaging() {
|
void InitPaging() {
|
||||||
|
StackFreeList = (list_entry_t) { &StackFreeList, &StackFreeList };
|
||||||
|
|
||||||
size_t* PML4 = (size_t*) 0xFFA000; // Layer 4
|
size_t Size = AlignUpwards(AllocatorSize(), PAGE_SIZE);
|
||||||
size_t* PDPE_RAM = (size_t*) 0xFFE000; // Layer 3, contains map for the first 4GB of RAM
|
Allocator = PhysAllocateZeroMem(Size);
|
||||||
size_t* PDE_RAM = (size_t*) 0xFFF000;
|
Allocator = CreateAllocatorWithPool(Allocator, Size);
|
||||||
|
|
||||||
size_t* PDPE_KERNEL = (size_t*) 0xFFB000; // Layer 3, contains map for the Kernel and everything it needs to run.
|
KernelAddressSpace = (address_space_t) {
|
||||||
size_t* PDE_KERNEL_FB = (size_t*) 0xFFC000; // Layer 2, contains map for the linear framebuffer.
|
.Lock = {0},
|
||||||
|
.PML4 = PhysAllocateZeroMem(PAGE_SIZE)
|
||||||
|
};
|
||||||
|
|
||||||
size_t* PT_KERNEL = (size_t*) 0xFFD000; // Layer 1, the page table for the kernel itself.
|
size_t* Pagetable = KernelAddressSpace.PML4;
|
||||||
|
|
||||||
size_t fb_ptr = (size_t) &fb;
|
// Identity map the higher half
|
||||||
|
for(int i = 256; i < 512; i++) {
|
||||||
SET_ADDRESS(PML4, PDPE_RAM); // 3rd Layer entry for RAM
|
Pagetable[i] = (size_t)PhysAllocateZeroMem(PAGE_SIZE);
|
||||||
SET_ADDRESS(PML4 + LAST_ENTRY, PDPE_KERNEL); // 3rd Layer entry for Kernel
|
Pagetable[i] = (size_t)(((char*)Pagetable[i]) - DIRECT_REGION);
|
||||||
|
Pagetable[i] |= (PAGE_PRESENT | PAGE_RW);
|
||||||
SET_ADDRESS(PDPE_KERNEL + LAST_ENTRY, PDE_KERNEL_FB); // 2nd Layer entry for the framebuffer
|
|
||||||
|
|
||||||
// Set the 480th entry (PDE_KERNEL_FB + (480 * 8))
|
|
||||||
// To the framebuffer + flags
|
|
||||||
SET_ADDRESS(PDE_KERNEL_FB + 3840, USERWRITEABLE_FLAGS(fb_ptr));
|
|
||||||
|
|
||||||
// In 4 byte increments, we're gonna map 3840 (the framebuffer)
|
|
||||||
// Up to (4096 - 8) in the PDE_KERNEL_FB with 2MB paging.
|
|
||||||
size_t MappingIterations = 1;
|
|
||||||
for(size_t i = 3844; i < 4088; i += 4) {
|
|
||||||
SET_ADDRESS(PDE_KERNEL_FB + i, USERWRITEABLE_FLAGS(fb_ptr) + (MappingIterations * (2 * MiB)));
|
|
||||||
MappingIterations++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now we map the last entry of PDE_KERNEL_FB to our Page Table
|
MMapEnt* TopEntry = (MMapEnt*)(((&bootldr) + bootldr.size) - sizeof(MMapEnt));
|
||||||
SET_ADDRESS(PDE_KERNEL_FB + LAST_ENTRY, PT_KERNEL);
|
size_t LargestAddress = TopEntry->ptr + TopEntry->size;
|
||||||
|
|
||||||
// Mapping the kernel into the page tables....
|
for(size_t Address = 0; Address < AlignUpwards(LargestAddress, PAGE_SIZE); Address += PAGE_SIZE) {
|
||||||
|
MapVirtualMemory(&KernelAddressSpace, (size_t*)(((char*)Address) + DIRECT_REGION), Address, MAP_WRITE);
|
||||||
SET_ADDRESS(PT_KERNEL, 0xFF8001); // bootldr, bootinfo
|
|
||||||
SET_ADDRESS(PT_KERNEL + 8, 0xFF9001); // environment
|
|
||||||
|
|
||||||
// Map the kernel itself
|
|
||||||
SET_ADDRESS(PT_KERNEL + 16, KernelAddr + 1);
|
|
||||||
|
|
||||||
// Iterate through the pages, identity mapping each one
|
|
||||||
MappingIterations = 1;
|
|
||||||
size_t MappingOffset = 0x14;
|
|
||||||
for(size_t i = 0; i < ((KernelEnd - KernelAddr) >> 12); i++) {
|
|
||||||
// Page Table + (0x10 increasing by 0x04 each time) = x * 4KiB
|
|
||||||
SET_ADDRESS(PT_KERNEL + MappingOffset, (MappingIterations * (4 * KiB)));
|
|
||||||
MappingOffset += 4;
|
|
||||||
MappingIterations++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now we need to map the core stacks. Top-down, from 0xDFF8
|
SerialPrintf("Mapping kernel into new memory map.\r\n");
|
||||||
// There's always at least one core, so we do that one fixed.
|
|
||||||
// TODO: Account for 0-core CPUs
|
//TODO: Disallow execution of rodata and data, and bootldr/environment
|
||||||
SET_ADDRESS(PT_KERNEL + LAST_ENTRY, 0xF14003);
|
for(void* Address = CAST(void*, KERNEL_REGION);
|
||||||
MappingIterations = 1;
|
Address < CAST(void*, KERNEL_REGION + 0x2000); // Lower half of Kernel
|
||||||
// For every core:
|
Address = CAST(void*, CAST(char*, Address) + PAGE_SIZE)) {
|
||||||
for(size_t i = 0; i < (bootldr.numcores + 3U) >> 2; i++) {
|
MapVirtualMemory(&KernelAddressSpace, Address, (CAST(size_t, Address) - KERNEL_REGION) + KERNEL_PHYSICAL, MAP_EXEC);
|
||||||
// PT_KERNEL[512 - (iterations + 1)] = 0x14003 + (iterations * page-width)
|
|
||||||
SET_ADDRESS(PT_KERNEL + LAST_ENTRY - (MappingIterations * 8), 0xF14003 + (4096 * MappingIterations));
|
|
||||||
MappingIterations++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SET_ADDRESS(PDPE_RAM, PDE_RAM + PAGE_PRESENT + PAGE_RW);
|
for(void* Address = CAST(void*, KERNEL_REGION + 0x2000);
|
||||||
SET_ADDRESS(PDPE_RAM + 8, 0xF10000 + PAGE_PRESENT + PAGE_RW);
|
Address < CAST(void*, KERNEL_REGION + 0x12000); // Higher half of kernel
|
||||||
SET_ADDRESS(PDPE_RAM + 16, 0xF11000 + PAGE_PRESENT + PAGE_RW);
|
Address = CAST(void*, CAST(char*, Address) + PAGE_SIZE)) {
|
||||||
SET_ADDRESS(PDPE_RAM + 24, 0xF12000 + PAGE_PRESENT + PAGE_RW);
|
MapVirtualMemory(&KernelAddressSpace, Address, (CAST(size_t, Address) - KERNEL_REGION) + KERNEL_PHYSICAL_2, MAP_EXEC);
|
||||||
|
|
||||||
// Identity map 4GB of ram
|
|
||||||
// Each page table can only hold 512 entries, but we
|
|
||||||
// just set up 4 of them - overflowing PDE_RAM (0xF000)
|
|
||||||
// will take us into 0x10000, into 0x11000, into 0x120000.
|
|
||||||
for(size_t i = 0; i < 512 * 4/*GB*/; i++) {
|
|
||||||
// add PDE_RAM, 4
|
|
||||||
// mov eax, 0x83
|
|
||||||
// add eax, 2*1024*1024
|
|
||||||
SET_ADDRESS(PDE_RAM + (i * 4), USERWRITEABLE_FLAGS(i * (2 * MiB)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map first 2MB of memory
|
for(void* Address = CAST(void*, FB_REGION);
|
||||||
SET_ADDRESS(PDE_RAM, 0xF13000 + PAGE_PRESENT + PAGE_RW);
|
Address < CAST(void*, 0x200000); // TODO: Turn this into a calculation with bootldr.fb_size
|
||||||
|
Address = CAST(void*, CAST(char*, Address) + PAGE_SIZE)) {
|
||||||
for(size_t i = 0; i < 512; i++) {
|
MapVirtualMemory(&KernelAddressSpace, Address, (CAST(size_t, Address) - FB_REGION) + FB_PHYSICAL, MAP_WRITE);
|
||||||
SET_ADDRESS(0xF13000 + i * 4, i * (4 * KiB) + PAGE_PRESENT + PAGE_RW);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0xA000 should now contain our memory map.
|
SerialPrintf("Kernel mapped into pagetables. New PML4 at 0x%p\r\n", KernelAddressSpace.PML4);
|
||||||
|
//ASSERT(Allocator != NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t GetCachingAttribute(pagecache_t Cache) {
|
||||||
|
switch (Cache) {
|
||||||
|
case CACHE_WRITE_BACK: return 0;
|
||||||
|
case CACHE_WRITE_THROUGH: return 1 << 2;
|
||||||
|
case CACHE_NONE: return 1 << 3;
|
||||||
|
case CACHE_WRITE_COMBINING: return 1 << 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1 << 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ExpandAllocator(size_t NewSize) {
|
||||||
|
size_t AllocSize = AlignUpwards(AllocatorPoolOverhead() + sizeof(size_t) * 5 + NewSize, PAGE_SIZE);
|
||||||
|
void* Pool = PhysAllocateMem(AllocSize);
|
||||||
|
return AddPoolToAllocator(Allocator, Pool, AllocSize) != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void GetPageFromTables(address_space_t* AddressSpace, size_t VirtualAddress, size_t** Page) {
|
||||||
|
|
||||||
|
//ASSERT(Page != NULL);
|
||||||
|
//ASSERT(AddressSpace != NULL);
|
||||||
|
|
||||||
|
size_t* Pagetable = AddressSpace->PML4;
|
||||||
|
for(int Level = 4; Level > 1; Level--) {
|
||||||
|
size_t* Entry = &Pagetable[(VirtualAddress >> (12u + 9u * (Level - 1))) & 0x1FFU];
|
||||||
|
|
||||||
|
ASSERT(*Entry & PAGE_PRESENT, "Page not present during retrieval");
|
||||||
|
|
||||||
|
Pagetable = (size_t*)((char*)(*Entry & 0x7ffffffffffff000ull) + DIRECT_REGION);
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(Pagetable[(VirtualAddress >> 12U) & 0x1FFU] & PAGE_PRESENT, "PDPE not present during retrieval");
|
||||||
|
*Page = &Pagetable[(VirtualAddress >> 12U) & 0x1FFU];
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SetAddressSpace(address_space_t* AddressSpace) {
|
||||||
|
//ASSERT(AddressSpace != NULL);
|
||||||
|
|
||||||
void InitPagingOldImpl() {
|
if((size_t)((char*)ReadControlRegister(3) + DIRECT_REGION) != (size_t) &AddressSpace->PML4) {
|
||||||
|
WriteControlRegister(3, CAST(size_t, &AddressSpace->PML4));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Disable paging so that we can work with the pagetable
|
void MapVirtualMemory(address_space_t* AddressSpace, void* VirtualAddress, size_t PhysicalAddress, mapflags_t Flag) {
|
||||||
//size_t registerTemp = ReadControlRegister(0);
|
|
||||||
//UNSET_PGBIT(registerTemp);
|
|
||||||
//WriteControlRegister(0, registerTemp);
|
|
||||||
|
|
||||||
// Clear space for our pagetable
|
//bool MapGlobally = false;
|
||||||
size_t PagetableDest = 0x1000;
|
size_t Virtual = (size_t)VirtualAddress;
|
||||||
memset((char*)PagetableDest, 0, 4096);
|
|
||||||
|
|
||||||
// Start setting pagetable indexes
|
//ASSERT(AddressSpace != NULL);
|
||||||
*((size_t*)PagetableDest) = 0x2003; // PDP at 0x2000, present & r/w
|
TicketAttemptLock(&AddressSpace->Lock);
|
||||||
*((size_t*)PagetableDest + 0x1000) = 0x3003; // PDT at 0x3000, present & r/w
|
|
||||||
*((size_t*)PagetableDest + 0x2000) = 0x4003; // PT at 0x4000, present & r/w
|
|
||||||
|
|
||||||
size_t value = 0x3;
|
size_t Flags = PAGE_PRESENT;
|
||||||
size_t offset = 8;
|
|
||||||
for(size_t i = 0; i < 512; i++) { // 512 iterations (entries into the page table)
|
if(Flag & MAP_WRITE)
|
||||||
*((size_t*) PagetableDest + offset) = value; // We're setting 512 bytes with x003
|
Flags |= MAP_WRITE;
|
||||||
// (identity mapping the first 4 megabytes of memory)
|
|
||||||
// (mapping the page table to itself)
|
if(Virtual < USER_REGION)
|
||||||
value += 4096; // Point to start of next page
|
Flags |= PAGE_USER;
|
||||||
offset += 8; // + 8 bytes (next entry in list)
|
//TODO: Global mapping
|
||||||
|
|
||||||
|
size_t* Pagetable = AddressSpace->PML4;
|
||||||
|
for(int Level = 4; Level > 1; Level--) {
|
||||||
|
size_t* Entry = &Pagetable[(Virtual >> (12u + 9u * (Level - 1))) & 0x1FFu];
|
||||||
|
|
||||||
|
if(!(*Entry & PAGE_PRESENT)) {
|
||||||
|
directptr_t Pointer = PhysAllocateZeroMem(PAGE_SIZE);
|
||||||
|
*Entry = (size_t)(((char*)Pointer) + DIRECT_REGION);
|
||||||
|
}
|
||||||
|
|
||||||
|
*Entry |= Flags;
|
||||||
|
|
||||||
|
Pagetable = (size_t*)(((char*)(*Entry & 0x7ffffffffffff000ull) + DIRECT_REGION));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable PAE paging
|
size_t* Entry = &Pagetable[(Virtual >> 12u) & 0x1FFu];
|
||||||
size_t reg = ReadControlRegister(4);
|
*Entry = Flags | PhysicalAddress;
|
||||||
SET_PAEBIT(reg);
|
|
||||||
WriteControlRegister(4, reg);
|
|
||||||
|
|
||||||
WriteControlRegister(3, PagetableDest);
|
|
||||||
|
|
||||||
|
|
||||||
|
if(AddressSpace != NULL) {
|
||||||
|
TicketUnlock(&AddressSpace->Lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnmapVirtualMemory(address_space_t* AddressSpace, void* VirtualAddress){
|
||||||
|
//ASSERT(AddressSpace != NULL);
|
||||||
|
|
||||||
|
TicketAttemptLock(&AddressSpace->Lock);
|
||||||
|
|
||||||
|
size_t* Entry;
|
||||||
|
GetPageFromTables(AddressSpace, (size_t)VirtualAddress, &Entry);
|
||||||
|
|
||||||
|
*Entry = 0;
|
||||||
|
InvalidatePage((size_t)VirtualAddress);
|
||||||
|
|
||||||
|
if(AddressSpace != NULL) {
|
||||||
|
TicketUnlock(&AddressSpace->Lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void CacheVirtualMemory(address_space_t* AddressSpace, void* VirtualAddress, pagecache_t Cache) {
|
||||||
|
|
||||||
|
//ASSERT(AddressSpace != NULL);
|
||||||
|
|
||||||
|
TicketAttemptLock(&AddressSpace->Lock);
|
||||||
|
|
||||||
|
size_t* Entry;
|
||||||
|
|
||||||
|
GetPageFromTables(AddressSpace, (size_t)VirtualAddress, &Entry);
|
||||||
|
|
||||||
|
*Entry &= ~((1 << 6) | (1 << 2) | (1 << 3));
|
||||||
|
*Entry |= GetCachingAttribute(Cache);
|
||||||
|
|
||||||
|
InvalidatePage((size_t)VirtualAddress);
|
||||||
|
|
||||||
|
if(AddressSpace != NULL) {
|
||||||
|
TicketUnlock(&AddressSpace->Lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* size_t registerTemp = ReadControlRegister(4);
|
void* AllocateMemory(size_t Bits) {
|
||||||
if(registerTemp & (1 << 7)) {
|
TicketAttemptLock(&AllocatorLock);
|
||||||
TOGGLE_PGEBIT(registerTemp);
|
|
||||||
WriteControlRegister(4, registerTemp);
|
void* Result = AllocatorMalloc(Allocator, Bits);
|
||||||
|
|
||||||
|
if(Result == NULL) {
|
||||||
|
if(!ExpandAllocator(Bits)) {
|
||||||
|
TicketUnlock(&AllocatorLock);
|
||||||
|
return 0ULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result = AllocatorMalloc(Allocator, Bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(registerTemp & (1 << 7))
|
if(Result != NULL) {
|
||||||
WriteControlRegister(4, registerTemp ^ (1 << 7));
|
memset(Result, 0, Bits);
|
||||||
|
}
|
||||||
|
|
||||||
size_t CPUIDReturn;
|
TicketUnlock(&AllocatorLock);
|
||||||
asm volatile("cpuid" : "=d" (CPUIDReturn) : "a" (0x80000001) : "%rbx", "%rcx");
|
return Result;
|
||||||
|
|
||||||
if(CPUIDReturn & (1 << 26)) {
|
}
|
||||||
SerialPrintf("System supports 1GB pages.\r\n");
|
|
||||||
|
|
||||||
if(registerTemp & (1 << 12)) {
|
|
||||||
SerialPrintf("PML5 paging available - using that instead.\r\n");
|
|
||||||
|
|
||||||
if(MemorySize > (1ULL << 57))
|
void* ReallocateMemory(void* Address, size_t NewSize) {
|
||||||
SerialPrintf("System has over 128Petabytes of RAM. Please consider upgrading the OS on your supercomputer.\r\n");
|
TicketAttemptLock(&AllocatorLock);
|
||||||
|
void* Result = AllocatorRealloc(Allocator, Address, NewSize);
|
||||||
size_t MaxPML5 = 1;
|
|
||||||
size_t MaxPML4 = 1;
|
|
||||||
size_t MaxPDP = 512;
|
|
||||||
|
|
||||||
size_t LastPML4Entry = 512;
|
if(Result == NULL) {
|
||||||
size_t LastPDPEntry = 512;
|
if(!ExpandAllocator(NewSize)) {
|
||||||
|
TicketUnlock(&AllocatorLock);
|
||||||
|
return 0ULL;
|
||||||
|
}
|
||||||
|
|
||||||
size_t MemorySearchDepth = MemorySize;
|
Result = AllocatorRealloc(Allocator, Address, NewSize);
|
||||||
|
}
|
||||||
|
|
||||||
while(MemorySearchDepth > (256ULL << 30)) {
|
TicketUnlock(&AllocatorLock);
|
||||||
MaxPML5++;
|
return Result;
|
||||||
MemorySearchDepth -= (256ULL << 30);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if(MaxPML5 > 512)
|
void FreeMemory(void* Address) {
|
||||||
MaxPML5 = 512;
|
TicketAttemptLock(&AllocatorLock);
|
||||||
|
AllocatorFree(Allocator, Address);
|
||||||
if(MemorySearchDepth) {
|
TicketUnlock(&AllocatorLock);
|
||||||
LastPDPEntry = ( (MemorySearchDepth + ((1 << 30) - 1)) & (~0ULL << 30)) >> 30;
|
}
|
||||||
|
|
||||||
if(MaxPML5 > 512)
|
void* AllocateKernelStack() {
|
||||||
MaxPML5 = 512;
|
void* StackAddress = NULL;
|
||||||
|
size_t StackSize = PAGE_SIZE * 4;
|
||||||
}
|
|
||||||
|
|
||||||
size_t PML4Size = PAGETABLE_SIZE * MaxPML5;
|
TicketAttemptLock(&StackLock);
|
||||||
size_t PDPSize = PML4Size * MaxPML4;
|
if(ListIsEmpty(&StackFreeList)) {
|
||||||
|
StackAddress = StackPointer;
|
||||||
|
StackPointer = (void*)(((char*)StackPointer) + (4*KiB) + StackSize);
|
||||||
|
|
||||||
size_t PML4Base = AllocatePagetable(PML4Size + PDPSize);
|
for(size_t i = 0; i < (StackSize / PAGE_SIZE); i++) {
|
||||||
size_t PDPBase = PML4Base + PML4Size;
|
directptr_t NewStack;
|
||||||
|
NewStack = PhysAllocateZeroMem(PAGE_SIZE);
|
||||||
for(size_t PML5Entry = 0; PML5Entry < MaxPML5; PML5Entry++) {
|
MapVirtualMemory(&KernelAddressSpace, (void*)((size_t)StackAddress + i * PAGE_SIZE), (size_t)((char*)NewStack) - DIRECT_REGION, MAP_WRITE);
|
||||||
Pagetable[PML5Entry] = PML4Base + (PML5Entry << 12);
|
|
||||||
|
|
||||||
if(PML5Entry == (MaxPML5 - 1))
|
|
||||||
MaxPML4 = LastPML4Entry;
|
|
||||||
|
|
||||||
for(size_t PML4Entry = 0; PML4Entry < MaxPML4; PML4Entry++) {
|
|
||||||
|
|
||||||
((size_t*) Pagetable[PML5Entry])[PML4Entry] = PDPBase + (((PML5Entry << 9) + PML5Entry) << 12);
|
|
||||||
|
|
||||||
if( (PML5Entry == (MaxPML5 - 1)) && (PML4Entry == (MaxPML4 -1)) )
|
|
||||||
MaxPDP = LastPDPEntry;
|
|
||||||
|
|
||||||
for(size_t PDPEntry = 0; PDPEntry < MaxPDP; PDPEntry++) {
|
|
||||||
((size_t* ) ((size_t* ) Pagetable[PML5Entry])[PML4Entry])[PDPEntry] = ( ((PML5Entry << 18) + (PML4Entry << 9) + PDPEntry) << 30) | (0x83);
|
|
||||||
}
|
|
||||||
|
|
||||||
((size_t* ) Pagetable[PML5Entry])[PML4Entry] |= 0x3;
|
|
||||||
}
|
|
||||||
|
|
||||||
Pagetable[PML5Entry] |= 0x3;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
SerialPrintf("PML4 available - using that instead.\r\n");
|
|
||||||
size_t MemorySearchDepth = MemorySize;
|
|
||||||
|
|
||||||
if(MemorySearchDepth > (1ULL << 48))
|
|
||||||
SerialPrintf("RAM limited to 256TB.\r\n");
|
|
||||||
|
|
||||||
size_t MaxPML4 = 1;
|
|
||||||
size_t MaxPDP = 512;
|
|
||||||
|
|
||||||
size_t LastPDPEntry = 512;
|
|
||||||
|
|
||||||
while(MemorySearchDepth > (512ULL << 30)) {
|
|
||||||
MaxPML4++;
|
|
||||||
MemorySearchDepth -= (512ULL << 30);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(MaxPML4 > 512)
|
|
||||||
MaxPML4 = 512;
|
|
||||||
|
|
||||||
if(MemorySearchDepth) {
|
|
||||||
LastPDPEntry = ( (MemorySearchDepth + ((1 << 30) - 1)) & (~0ULL << 30)) >> 30;
|
|
||||||
|
|
||||||
if(LastPDPEntry > 512)
|
|
||||||
LastPDPEntry = 512;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t PDPSize = PAGETABLE_SIZE * MaxPML4;
|
|
||||||
size_t PDPBase = AllocatePagetable(PDPSize);
|
|
||||||
|
|
||||||
for(size_t PML4Entry = 0; PML4Entry < MaxPML4; PML4Entry++) {
|
|
||||||
Pagetable[PML4Entry] = PDPBase + (PML4Entry << 12);
|
|
||||||
|
|
||||||
if(PML4Entry == (MaxPML4 - 1)) {
|
|
||||||
MaxPDP = LastPDPEntry;
|
|
||||||
}
|
|
||||||
|
|
||||||
for(size_t PDPEntry = 0; PDPEntry < MaxPDP; PDPEntry++) {
|
|
||||||
((size_t* ) Pagetable[PML4Entry])[PDPEntry] = (((PML4Entry << 9) + PDPEntry) << 30) | 0x83;
|
|
||||||
}
|
|
||||||
|
|
||||||
Pagetable[PML4Entry] |= 0x3;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SerialPrintf("System does not support 1GB pages - using 2MiB paging instead.\r\n");
|
list_entry_t* StackEntry = StackFreeList.Next;
|
||||||
|
ListRemove(StackEntry);
|
||||||
size_t MemorySearchDepth = MemorySize;
|
memset(StackEntry, 0, StackSize);
|
||||||
|
StackAddress = (void*)StackEntry;
|
||||||
if(MemorySearchDepth > (1ULL << 48)) {
|
|
||||||
SerialPrintf("Usable RAM is limited to 256TB, and the page table alone will use 1GB of space in memory.\r\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t MaxPML4 = 1, MaxPDP = 512, MaxPD = 512, LastPDPEntry = 1;
|
|
||||||
|
|
||||||
while(MemorySearchDepth > (512ULL << 30)) {
|
|
||||||
MaxPML4++;
|
|
||||||
MemorySearchDepth -= (512ULL << 30);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(MaxPML4 > 512)
|
|
||||||
MaxPML4 = 512;
|
|
||||||
|
|
||||||
if(MemorySearchDepth) {
|
|
||||||
LastPDPEntry = ((MemorySearchDepth + ((1 << 30) - 1)) & (~0ULL << 30)) >> 30;
|
|
||||||
|
|
||||||
if(LastPDPEntry > 512)
|
|
||||||
LastPDPEntry = 512;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t PDPSize = PAGETABLE_SIZE * MaxPML4;
|
|
||||||
size_t PDSize = PDPSize * MaxPDP;
|
|
||||||
|
|
||||||
size_t PDPBase = AllocatePagetable(PDPSize + PDSize);
|
|
||||||
size_t PDBase = PDPBase + PDSize;
|
|
||||||
|
|
||||||
for(size_t PML4Entry = 0; PML4Entry < MaxPML4; PML4Entry++) {
|
|
||||||
Pagetable[PML4Entry] = PDBase + (PML4Entry << 12);
|
|
||||||
|
|
||||||
if(PML4Entry == (MaxPML4 - 1)) {
|
|
||||||
MaxPDP = LastPDPEntry;
|
|
||||||
}
|
|
||||||
|
|
||||||
for(size_t PDPEntry = 0; PDPEntry < MaxPDP; PDPEntry++) {
|
|
||||||
( (size_t* ) Pagetable[PML4Entry])[PDPEntry] = PDBase + (((PML4Entry << 9) + PDPEntry) << 12);
|
|
||||||
|
|
||||||
for(size_t PDEntry = 0; PDEntry < MaxPD; PDEntry++) {
|
|
||||||
( (size_t* ) ((size_t*) Pagetable[PML4Entry])[PDPEntry])[PDEntry] = (( (PML4Entry << 18) + (PDPEntry << 9) + PDPEntry) << 21) | 0x83;
|
|
||||||
}
|
|
||||||
|
|
||||||
( (size_t* ) Pagetable[PML4Entry])[PDPEntry] |= 0x3;
|
|
||||||
}
|
|
||||||
|
|
||||||
Pagetable[PML4Entry] |= 0x3;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
WriteControlRegister(3, Pagetable);
|
TicketUnlock(&StackLock);
|
||||||
|
|
||||||
registerTemp = ReadControlRegister(4);
|
StackAddress = (void*)((size_t)StackAddress + StackSize);
|
||||||
if(!(registerTemp & (1 << 7))) {
|
StackAddress = (void*)((size_t)StackAddress - sizeof(size_t) * 2);
|
||||||
TOGGLE_PGEBIT(registerTemp);
|
|
||||||
WriteControlRegister(4, registerTemp);
|
return StackAddress;
|
||||||
}*/
|
}
|
||||||
|
|
||||||
|
void FreeKernelStack(void* StackAddress) {
|
||||||
|
TicketAttemptLock(&StackLock);
|
||||||
|
list_entry_t* ListEntry = (list_entry_t*)(((size_t)(StackAddress) + (sizeof(size_t) * 2)) - (PAGE_SIZE * 4));
|
||||||
|
ListAdd(&StackFreeList, ListEntry);
|
||||||
|
TicketUnlock(&StackLock);
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#include <kernel/chroma.h>
|
#include <kernel/chroma.h>
|
||||||
#include <kernel/system/heap.h>
|
#include <kernel/system/heap.h>
|
||||||
|
#include <lainlib/lainlib.h>
|
||||||
|
|
||||||
/************************
|
/************************
|
||||||
*** Team Kitty, 2020 ***
|
*** Team Kitty, 2020 ***
|
||||||
|
@ -22,10 +23,144 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#define MIN_ORDER 3
|
||||||
|
#define PEEK(type, address) (*((volatile type*)(address)))
|
||||||
|
|
||||||
uint8_t* Memory = ((uint8_t*)(&end));
|
uint8_t* Memory = ((uint8_t*)(&end));
|
||||||
uint8_t* MemoryStart;
|
uint8_t* MemoryStart;
|
||||||
size_t MemoryBuckets;
|
size_t MemoryBuckets;
|
||||||
|
|
||||||
|
static buddy_t LowBuddy = {
|
||||||
|
.MaxOrder = 32,
|
||||||
|
.Base = (directptr_t) DIRECT_REGION,
|
||||||
|
.List = (directptr_t[32 - MIN_ORDER]) {0},
|
||||||
|
.Lock = {0},
|
||||||
|
};
|
||||||
|
|
||||||
|
static buddy_t HighBuddy = {
|
||||||
|
.MaxOrder = 64,
|
||||||
|
.Base = 0,
|
||||||
|
.List = (directptr_t[64 - MIN_ORDER]) {0},
|
||||||
|
.Lock = {0},
|
||||||
|
};
|
||||||
|
|
||||||
|
static size_t MemoryLength;
|
||||||
|
|
||||||
|
static bool CheckBuddies(buddy_t* Buddy, directptr_t InputA, directptr_t InputB, size_t Size) {
|
||||||
|
size_t LowerBuddy = MIN(CAST(size_t, InputA), CAST(size_t, InputB)) - (size_t) Buddy->Base;
|
||||||
|
size_t HigherBuddy = MAX(CAST(size_t, InputA), CAST(size_t, InputB)) - (size_t) Buddy->Base;
|
||||||
|
|
||||||
|
return (LowerBuddy ^ Size) == HigherBuddy;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void AddToBuddyList(buddy_t* Buddy, directptr_t Address, size_t Order, bool NewEntry) {
|
||||||
|
directptr_t ListHead = Buddy->List[Order - MIN_ORDER];
|
||||||
|
|
||||||
|
//SerialPrintf("Adding new entry to buddy: Address 0x%p with order %d, New Entry is %s\r\n", Address, Order, NewEntry ? "true" : "false");
|
||||||
|
|
||||||
|
/*
|
||||||
|
SerialPrintf("About to poke memory..\r\n");
|
||||||
|
PEEK(directptr_t, Address) = 0;
|
||||||
|
SerialPrintf("Did it work?\r\n");
|
||||||
|
*/
|
||||||
|
|
||||||
|
size_t Size = 1ull << Order;
|
||||||
|
|
||||||
|
TicketAttemptLock(&Buddy->Lock);
|
||||||
|
|
||||||
|
//SerialPrintf("Ticketlock engaged\r\n");
|
||||||
|
|
||||||
|
if(!NewEntry && ListHead != 0) {
|
||||||
|
directptr_t ListPrevious = 0;
|
||||||
|
|
||||||
|
while(true) {
|
||||||
|
if(CheckBuddies(Buddy, ListHead, Address, Size)) {
|
||||||
|
if(ListPrevious != 0) {
|
||||||
|
PEEK(directptr_t, ListPrevious) = PEEK(directptr_t, ListHead);
|
||||||
|
} else
|
||||||
|
Buddy->List[Order - MIN_ORDER] = PEEK(directptr_t, ListHead);
|
||||||
|
|
||||||
|
AddToBuddyList(Buddy, MIN(ListHead, Address), Order + 1, false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(PEEK(directptr_t, ListHead) == 0) {
|
||||||
|
PEEK(directptr_t, ListHead) = Address;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ListPrevious = ListHead;
|
||||||
|
ListHead = PEEK(directptr_t, ListHead);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
//SerialPrintf("\tAbout to poke memory 0x%p - current value is 0x%x\r\n", Address, *((size_t*)(Address)));
|
||||||
|
*((size_t*)(Address)) = (size_t) ListHead;
|
||||||
|
Buddy->List[Order - MIN_ORDER] = Address;
|
||||||
|
}
|
||||||
|
|
||||||
|
TicketUnlock(&Buddy->Lock);
|
||||||
|
|
||||||
|
//SerialPrintf("Ticketlock Released.\r\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void AddRangeToBuddy(buddy_t* Buddy, directptr_t Base, size_t Size) {
|
||||||
|
//SerialPrintf("Starting a new range addition.\r\n\t");
|
||||||
|
while(Size > (1ull << MIN_ORDER)) {
|
||||||
|
//SerialPrintf("New iteration. Current Size: 0x%x\r\n\t", Size);
|
||||||
|
for(int Order = Buddy->MaxOrder - 1; Order >= MIN_ORDER; Order--) {
|
||||||
|
//SerialPrintf("New Loop. Current Order: %d\r\n\t", Order);
|
||||||
|
if(Size >= (1ull << Order)) {
|
||||||
|
//SerialPrintf("\tNew loop check passed.\r\n\t");
|
||||||
|
AddToBuddyList(Buddy, Base, Order, true);
|
||||||
|
//SerialPrintf("\tEntry added to current buddy. Moving onto memory operations..\r\n\t");
|
||||||
|
Base = (void*)((((char*)Base) + (1ull << Order)));
|
||||||
|
Size -= 1ull << Order;
|
||||||
|
//SerialPrintf("\tMemory operations complete. Moving onto next iteration.\r\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static directptr_t BuddyAllocate(buddy_t* Buddy, size_t Size) {
|
||||||
|
int InitialOrder = MAX((64 - CLZ(Size - 1)), MIN_ORDER);
|
||||||
|
|
||||||
|
size_t WantedSize = 1ull << InitialOrder;
|
||||||
|
|
||||||
|
if(InitialOrder >= Buddy->MaxOrder) {
|
||||||
|
SerialPrintf("Tried to allocate too much physical memory for buddy 0x%p\r\n", Buddy);
|
||||||
|
SerialPrintf("Buddy 0x%p has max order %d, but 0x%x bytes was requested.\r\nInitial Order: %d, Wanted Size: 0x%x\r\n", Buddy, Buddy->MaxOrder, Size, InitialOrder, WantedSize);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
TicketAttemptLock(&Buddy->Lock);
|
||||||
|
|
||||||
|
SerialPrintf("Searching for a valid order to allocate into. Condition: {\r\n\tOrder: %d,\r\n\tSize: 0x%x\r\n}\r\n\n", InitialOrder, WantedSize);
|
||||||
|
|
||||||
|
for(int Order = InitialOrder; Order < Buddy->MaxOrder; Order++) {
|
||||||
|
SerialPrintf("\tCurrent Order: %d, Buddy entry: %x\r\n", Order, Buddy->List[Order - MIN_ORDER]);
|
||||||
|
if(Buddy->List[Order - MIN_ORDER] != 0) {
|
||||||
|
SerialPrintf("\t\tFound a valid Order!\r\n");
|
||||||
|
directptr_t Address = Buddy->List[Order - MIN_ORDER];
|
||||||
|
Buddy->List[Order - MIN_ORDER] = PEEK(directptr_t, Address);
|
||||||
|
TicketUnlock(&Buddy->Lock);
|
||||||
|
|
||||||
|
size_t FoundSize = 1ull << Order;
|
||||||
|
|
||||||
|
SerialPrintf("\t\tAdding area - Address 0x%p, Size 0x%x\r\n\n", Address, FoundSize);
|
||||||
|
|
||||||
|
AddRangeToBuddy(Buddy, (void*)((size_t)Address + WantedSize), FoundSize - WantedSize);
|
||||||
|
|
||||||
|
SerialPrintf("\t\tArea added!\r\n\n");
|
||||||
|
return Address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SerialPrintf("BuddyAllocate: Unable to find a valid order to allocate!\r\nInitial Order: %d, WantedSize: 0x%x\r\n\r\n", InitialOrder, WantedSize);
|
||||||
|
|
||||||
|
TicketUnlock(&Buddy->Lock);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
void InitMemoryManager() {
|
void InitMemoryManager() {
|
||||||
|
|
||||||
|
@ -77,7 +212,7 @@ void ListMemoryMap() {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
for(MMapEnt* MapEntry = &bootldr.mmap; (size_t)MapEntry < (size_t)&environment; MapEntry++) {
|
for(MMapEnt* MapEntry = &bootldr.mmap; (size_t)MapEntry < (size_t)&bootldr + bootldr.size; MapEntry++) {
|
||||||
char EntryType[8] = {0};
|
char EntryType[8] = {0};
|
||||||
switch(MMapEnt_Type(MapEntry)) {
|
switch(MMapEnt_Type(MapEntry)) {
|
||||||
case MMAP_FREE:
|
case MMAP_FREE:
|
||||||
|
@ -101,60 +236,99 @@ void ListMemoryMap() {
|
||||||
if(entry_from != 0 && entry_to != 0)
|
if(entry_from != 0 && entry_to != 0)
|
||||||
SerialPrintf("[ mem 0x%p-0x%p] %s\r\n", entry_from, entry_to, EntryType);
|
SerialPrintf("[ mem 0x%p-0x%p] %s\r\n", entry_from, entry_to, EntryType);
|
||||||
|
|
||||||
|
if(MMapEnt_Type(MapEntry) == MMAP_FREE) {
|
||||||
|
SerialPrintf("\tAdding this entry to the physical memory manager!\r\n");
|
||||||
|
AddRangeToPhysMem((void*)((char*)(MMapEnt_Ptr(MapEntry) /* + DIRECT_REGION*/ )), MMapEnt_Size(MapEntry));
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t AllocateFrame() {
|
void AddRangeToPhysMem(directptr_t Base, size_t Size) {
|
||||||
size_t FreePage = SeekFrame();
|
if(Base < (void*)(LOWER_REGION + DIRECT_REGION)) {
|
||||||
SET_BIT(FreePage);
|
SerialPrintf("New range in lower memory: 0x%p, size 0x%x\r\n", Base, Size);
|
||||||
return FreePage;
|
AddRangeToBuddy(&LowBuddy, Base, Size);
|
||||||
}
|
} else {
|
||||||
|
if(HighBuddy.Base == NULL) {
|
||||||
|
HighBuddy.Base = Base;
|
||||||
|
}
|
||||||
|
|
||||||
void FreeFrame(size_t Frame) {
|
AddRangeToBuddy(&HighBuddy, Base, Size);
|
||||||
UNSET_BIT(Frame);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t SeekFrame() {
|
|
||||||
for(size_t i = 0; i < MemoryPages; i++) {
|
|
||||||
if(!READ_BIT(i))
|
|
||||||
return i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SerialPrintf("Memory manager: Critical!\r\n");
|
if(MemoryLength < AlignUpwards((size_t)Base + Size, PAGE_SIZE) / PAGE_SIZE) {
|
||||||
return (size_t) -1;
|
MemoryLength = AlignUpwards((size_t)Base + Size, PAGE_SIZE) / PAGE_SIZE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryTest() {
|
directptr_t PhysAllocateLowMem(size_t Size) {
|
||||||
SerialPrintf("Initializing basic memory test..\r\n");
|
directptr_t Pointer = BuddyAllocate(&LowBuddy, Size);
|
||||||
bool Passed = true;
|
ASSERT(Pointer != NULL, "PhysAllocateLowMem: Allocation failed!");
|
||||||
size_t FirstPage = SeekFrame();
|
|
||||||
/*(void* FirstPageAlloc = (void*)*/ AllocateFrame();
|
|
||||||
size_t SecondPage = SeekFrame();
|
|
||||||
/*void* SecondPageAlloc = (void*)*/ AllocateFrame();
|
|
||||||
|
|
||||||
if(!(FirstPage == 0 && SecondPage == 1)) {
|
return Pointer;
|
||||||
Passed = false;
|
}
|
||||||
SerialPrintf("First iteration: Failed, First page %x, Second page %x.\r\n", FirstPage, SecondPage);
|
|
||||||
}
|
directptr_t PhysAllocateMem(size_t Size) {
|
||||||
|
directptr_t Pointer = NULL;
|
||||||
|
|
||||||
|
if(HighBuddy.Base == 0)
|
||||||
|
Pointer = BuddyAllocate(&HighBuddy, Size);
|
||||||
|
|
||||||
|
if(Pointer == NULL)
|
||||||
|
Pointer = BuddyAllocate(&LowBuddy, Size);
|
||||||
|
|
||||||
FreeFrame(SecondPage);
|
ASSERT(Pointer != NULL, "PhysAllocateMem: Unable to allocate memory!");
|
||||||
SecondPage = SeekFrame();
|
|
||||||
|
|
||||||
if(SecondPage != 1)
|
return Pointer;
|
||||||
Passed = false;
|
}
|
||||||
|
|
||||||
FreeFrame(FirstPage);
|
directptr_t PhysAllocateZeroMem(size_t Size) {
|
||||||
FirstPage = SeekFrame();
|
directptr_t Pointer = PhysAllocateMem(Size);
|
||||||
|
memset(Pointer, 0, Size);
|
||||||
|
return Pointer;
|
||||||
|
}
|
||||||
|
|
||||||
if(FirstPage != 0)
|
directptr_t PhysAllocateLowZeroMem(size_t Size) {
|
||||||
Passed = false;
|
directptr_t Pointer = PhysAllocateLowMem(Size);
|
||||||
|
memset(Pointer, 0, Size);
|
||||||
|
return Pointer;
|
||||||
|
|
||||||
if(Passed)
|
}
|
||||||
SerialPrintf("Memory test passed.\r\n");
|
|
||||||
else {
|
void PhysFreeMem(directptr_t Pointer, size_t Size) {
|
||||||
SerialPrintf("Memory test failed.\r\n");
|
ASSERT(Pointer >= (directptr_t) DIRECT_REGION, "PhysFreeMem: Attempting to free memory not in the direct mapping region.");
|
||||||
SerialPrintf("First page %x, Second page %x.\r\n", FirstPage, SecondPage);
|
|
||||||
|
buddy_t* Buddy;
|
||||||
|
|
||||||
|
if(Pointer < (void*)(LOWER_REGION + DIRECT_REGION))
|
||||||
|
Buddy = &LowBuddy;
|
||||||
|
else
|
||||||
|
Buddy = &HighBuddy;
|
||||||
|
|
||||||
|
int Order = MAX(64 - CLZ(Size - 1), MIN_ORDER);
|
||||||
|
AddToBuddyList(Buddy, Pointer, Order, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static _Atomic(uint16_t)* PageRefCount = NULL;
|
||||||
|
|
||||||
|
void PhysAllocatorInit() {
|
||||||
|
PageRefCount = PhysAllocateZeroMem(sizeof(uint16_t) * MemoryPages);
|
||||||
|
}
|
||||||
|
|
||||||
|
directptr_t PhysAllocatePage() {
|
||||||
|
directptr_t Page = PhysAllocateMem(PAGE_SIZE);
|
||||||
|
PhysRefPage(Page);
|
||||||
|
return Page;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PhysRefPage(directptr_t Page) {
|
||||||
|
PageRefCount[(size_t) Page >> PAGE_SHIFT]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PhysFreePage(directptr_t Page) {
|
||||||
|
if(--PageRefCount[(size_t)Page >> PAGE_SHIFT] == 0) {
|
||||||
|
PhysFreeMem(Page, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user