2020-04-11 21:59:39 +00:00
# include <kernel/chroma.h>
2020-08-31 20:47:52 +00:00
# include <lainlib/lainlib.h>
2020-04-11 21:59:39 +00:00
2020-08-22 23:48:49 +00:00
/************************
* * * Team Kitty , 2020 * * *
* * * Chroma * * *
* * * * * * * * * * * * * * * * * * * * * * */
/****************************************
* W O R K I N P R O G R E S S *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
* This file contains functions for virtual memory management .
*
* Virtual Memory Management is still a work in progress .
* The functions here are hold - offs from old versions of the software implemented here , as well as from the EFI version of Chroma , called Sync .
*
* There , these functions worked , but here , under BIOS , it ' s a lot more difficult .
* It will take some time to get these functions working .
*
2020-08-31 20:47:52 +00:00
* The general plan , being that the BOOTBOOT loader has given us static addresses for all of our doodads ,
* is to keep the core kernel where it is ( FFFFFFFFFFE00000 ) and load in modules and libraries around it .
*
* We start in the higher half , so we ' ll dedicate the lower half ( 7FF FFFFFFFFF and below ) to userspace .
*
* That means we have about 3 terabytes of RAM for the kernel .
* This will be identity mapped , always .
*
* Handily , since most modern processors ignore the highest 2 bytes of a virtual address , and the kernel
* is mapped to 0x80000000000 and above , we can use the nomenclature :
* * 0x00007FFFFFFFFFFF and below is user space .
* * 0xFFFF800000000000 and above is kernel space .
* The processor will ignore the first 4 chars , and this provides a great deal of readability for the
* future of the kernel .
*
* We ' ll have a kernel heap mapped into this kernel space , as well as a kernel stack ( for task switching and error tracing ) .
* These will be 1 GB each .
* We may have to increase this in the future , once Helix is fully integrated .
* Helix will take a lot of memory , as it is a fully featured 3 D engine . We may have to implement things like
* texture streaming and mipmapping . Minimising RAM usage is NOT a priority for me , but it would be nice
* to have a minimum requirement above 32 GB .
*
* // TODO: Expand Kernel Heap
*
*
* //TODO: there are lots of calls to AllocateFrame here, those need to be separated out into AllocateZeroFrame if necessary.
*
*
2020-08-22 23:48:49 +00:00
*/
2020-11-28 16:43:43 +00:00
//extern size_t _kernel_text_start;
2020-08-31 20:47:52 +00:00
extern size_t _kernel_rodata_start ;
extern size_t _kernel_data_start ;
2020-08-22 23:48:49 +00:00
2020-12-02 02:34:47 +00:00
size_t KernelLocation ;
2020-07-07 23:38:59 +00:00
//__attribute__((aligned(4096))) static size_t Pagetable[512] = {0};
2020-04-11 21:59:39 +00:00
# define LAST_ENTRY 0xFF8
# define SET_ADDRESS(a,b) ((*(size_t*) (a)) = (size_t) b)
2020-08-31 20:47:52 +00:00
/*
* It turns out it ' s useful to have macros for the standard
* data size units .
*
* Who would ' ve thoguht ?
*/
2020-04-11 21:59:39 +00:00
# define KiB 1 * 1024
# define MiB 1 * 1024 * KiB
# define PAGE_PRESENT 1
# define PAGE_RW 2
2020-08-31 20:47:52 +00:00
# define PAGE_USER 4
# define PAGE_GLOBAL 8
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
# define USERWRITEABLE_FLAGS(a) ((a & 0xFFFFFF00) + 0x83)
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
// The AbstractAllocator control struct
static allocator_t Allocator = NULL ;
// The AbstractAllocator Ticketlock.
static ticketlock_t AllocatorLock = { 0 } ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
// Entries to help allocate the Kernel Stack
static list_entry_t StackFreeList ;
static ticketlock_t StackLock = { 0 } ;
static void * StackPointer = ( void * ) KERNEL_STACK_REGION ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
// A temporary itoa function for better debugging..
const char * IntToAscii ( int In ) {
char * OutputBuffer = " " ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t Temp , i = 0 , j = 0 ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
do {
Temp = In % 10 ;
OutputBuffer [ i + + ] = ( Temp < 10 ) ? ( Temp + ' 0 ' ) : ( Temp + ' a ' - 10 ) ;
} while ( In / = 10 ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
OutputBuffer [ i - - ] = 0 ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
for ( j = 0 ; j < i ; j + + , i - - ) {
Temp = OutputBuffer [ j ] ;
OutputBuffer [ j ] = OutputBuffer [ i ] ;
OutputBuffer [ i ] = Temp ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
return OutputBuffer ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
}
void InitPaging ( ) {
StackFreeList = ( list_entry_t ) { & StackFreeList , & StackFreeList } ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t Size = AlignUpwards ( AllocatorSize ( ) , PAGE_SIZE ) ;
Allocator = PhysAllocateZeroMem ( Size ) ;
Allocator = CreateAllocatorWithPool ( Allocator , Size ) ;
2020-04-11 21:59:39 +00:00
2020-11-09 18:43:20 +00:00
SerialPrintf ( " [ Mem] Everything preallocated for paging. \n " ) ;
2020-09-25 15:47:10 +00:00
2020-08-31 20:47:52 +00:00
KernelAddressSpace = ( address_space_t ) {
. Lock = { 0 } ,
. PML4 = PhysAllocateZeroMem ( PAGE_SIZE )
} ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t * Pagetable = KernelAddressSpace . PML4 ;
2020-11-09 18:43:20 +00:00
//SerialPrintf("[ Mem] About to identity map the higher half.\n");
2020-08-31 20:47:52 +00:00
// Identity map the higher half
for ( int i = 256 ; i < 512 ; i + + ) {
Pagetable [ i ] = ( size_t ) PhysAllocateZeroMem ( PAGE_SIZE ) ;
Pagetable [ i ] = ( size_t ) ( ( ( char * ) Pagetable [ i ] ) - DIRECT_REGION ) ;
Pagetable [ i ] | = ( PAGE_PRESENT | PAGE_RW ) ;
2020-11-09 18:43:20 +00:00
//SerialPrintf("%d", i - 256);
2020-04-11 21:59:39 +00:00
}
2020-11-09 18:43:20 +00:00
SerialPrintf ( " [ Mem] Identity mapping higher half complete. \n " ) ;
2020-09-25 15:47:10 +00:00
2020-11-28 16:43:43 +00:00
MMapEnt * TopEntry = ( MMapEnt * ) ( ( ( size_t ) ( & bootldr ) + bootldr . size ) - sizeof ( MMapEnt ) ) ;
2020-08-31 20:47:52 +00:00
size_t LargestAddress = TopEntry - > ptr + TopEntry - > size ;
2020-11-28 16:43:43 +00:00
SerialPrintf ( " [ Mem] About to map lower memory into the Direct Region. Highest address = 0x%p \n " , AlignUpwards ( LargestAddress , PAGE_SIZE ) ) ;
2020-08-31 20:47:52 +00:00
for ( size_t Address = 0 ; Address < AlignUpwards ( LargestAddress , PAGE_SIZE ) ; Address + = PAGE_SIZE ) {
MapVirtualMemory ( & KernelAddressSpace , ( size_t * ) ( ( ( char * ) Address ) + DIRECT_REGION ) , Address , MAP_WRITE ) ;
2020-04-11 21:59:39 +00:00
}
2020-11-09 18:43:20 +00:00
SerialPrintf ( " [ Mem] Lower half mapping complete. \n " ) ;
2020-04-11 21:59:39 +00:00
2020-11-09 18:43:20 +00:00
SerialPrintf ( " [ Mem] Mapping kernel into new memory map. \r \n " ) ;
2020-08-31 20:47:52 +00:00
//TODO: Disallow execution of rodata and data, and bootldr/environment
for ( void * Address = CAST ( void * , KERNEL_REGION ) ;
2020-12-02 02:34:47 +00:00
Address < CAST ( void * , KERNEL_REGION + ( KernelEnd - KernelAddr ) ) ;
2020-08-31 20:47:52 +00:00
Address = CAST ( void * , CAST ( char * , Address ) + PAGE_SIZE ) ) {
2020-12-02 02:34:47 +00:00
SerialPrintf ( " [ mem] Mapping 0x%p to 0x%p, relative to kernel at 0x%p \r \n " , ( CAST ( size_t , Address ) - KERNEL_REGION ) + KernelLocation , Address , ( CAST ( size_t , Address ) - KERNEL_REGION ) ) ;
2020-11-28 16:43:43 +00:00
MapVirtualMemory ( & KernelAddressSpace , Address , ( CAST ( size_t , Address ) - KERNEL_REGION ) + KernelLocation , MAP_EXEC ) ;
2020-04-11 21:59:39 +00:00
}
2020-11-28 16:43:43 +00:00
/*for(void* Address = CAST(void*, KERNEL_REGION + 0x2000);
2020-08-31 20:47:52 +00:00
Address < CAST ( void * , KERNEL_REGION + 0x12000 ) ; // Higher half of kernel
Address = CAST ( void * , CAST ( char * , Address ) + PAGE_SIZE ) ) {
MapVirtualMemory ( & KernelAddressSpace , Address , ( CAST ( size_t , Address ) - KERNEL_REGION ) + KERNEL_PHYSICAL_2 , MAP_EXEC ) ;
2020-11-28 16:43:43 +00:00
} */
2020-12-02 02:34:47 +00:00
SerialPrintf ( " [ mem] Framebuffer at 0x%p, is 0x%p long. Mapping to 0x%p. \r \n " , bootldr . fb_ptr , bootldr . fb_size , FB_REGION ) ;
2020-08-31 20:47:52 +00:00
for ( void * Address = CAST ( void * , FB_REGION ) ;
2020-12-02 02:34:47 +00:00
Address < CAST ( void * , bootldr . fb_size + FB_REGION ) ;
2020-08-31 20:47:52 +00:00
Address = CAST ( void * , CAST ( char * , Address ) + PAGE_SIZE ) ) {
MapVirtualMemory ( & KernelAddressSpace , Address , ( CAST ( size_t , Address ) - FB_REGION ) + FB_PHYSICAL , MAP_WRITE ) ;
2020-04-11 21:59:39 +00:00
}
2020-11-09 18:43:20 +00:00
SerialPrintf ( " [ Mem] Kernel mapped into pagetables. New PML4 at 0x%p \r \n " , KernelAddressSpace . PML4 ) ;
2020-11-28 16:43:43 +00:00
SerialPrintf ( " [ Mem] About to move into our own pagetables. \r \n " ) ;
WriteControlRegister ( 3 , ( size_t ) KernelAddressSpace . PML4 ) ;
SerialPrintf ( " [ Mem] We survived! \r \n " ) ;
2020-08-31 20:47:52 +00:00
//ASSERT(Allocator != NULL);
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
static size_t GetCachingAttribute ( pagecache_t Cache ) {
switch ( Cache ) {
case CACHE_WRITE_BACK : return 0 ;
case CACHE_WRITE_THROUGH : return 1 < < 2 ;
case CACHE_NONE : return 1 < < 3 ;
case CACHE_WRITE_COMBINING : return 1 < < 6 ;
}
return 1 < < 3 ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
static bool ExpandAllocator ( size_t NewSize ) {
size_t AllocSize = AlignUpwards ( AllocatorPoolOverhead ( ) + sizeof ( size_t ) * 5 + NewSize , PAGE_SIZE ) ;
void * Pool = PhysAllocateMem ( AllocSize ) ;
return AddPoolToAllocator ( Allocator , Pool , AllocSize ) ! = NULL ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
static void GetPageFromTables ( address_space_t * AddressSpace , size_t VirtualAddress , size_t * * Page ) {
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
//ASSERT(Page != NULL);
//ASSERT(AddressSpace != NULL);
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t * Pagetable = AddressSpace - > PML4 ;
for ( int Level = 4 ; Level > 1 ; Level - - ) {
size_t * Entry = & Pagetable [ ( VirtualAddress > > ( 12u + 9u * ( Level - 1 ) ) ) & 0x1FFU ] ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
ASSERT ( * Entry & PAGE_PRESENT , " Page not present during retrieval " ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
Pagetable = ( size_t * ) ( ( char * ) ( * Entry & 0x7ffffffffffff000ull ) + DIRECT_REGION ) ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
ASSERT ( Pagetable [ ( VirtualAddress > > 12U ) & 0x1FFU ] & PAGE_PRESENT , " PDPE not present during retrieval " ) ;
* Page = & Pagetable [ ( VirtualAddress > > 12U ) & 0x1FFU ] ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
void SetAddressSpace ( address_space_t * AddressSpace ) {
//ASSERT(AddressSpace != NULL);
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( ( size_t ) ( ( char * ) ReadControlRegister ( 3 ) + DIRECT_REGION ) ! = ( size_t ) & AddressSpace - > PML4 ) {
WriteControlRegister ( 3 , CAST ( size_t , & AddressSpace - > PML4 ) ) ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void MapVirtualMemory ( address_space_t * AddressSpace , void * VirtualAddress , size_t PhysicalAddress , mapflags_t Flag ) {
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
//bool MapGlobally = false;
size_t Virtual = ( size_t ) VirtualAddress ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
//ASSERT(AddressSpace != NULL);
TicketAttemptLock ( & AddressSpace - > Lock ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t Flags = PAGE_PRESENT ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( Flag & MAP_WRITE )
Flags | = MAP_WRITE ;
if ( Virtual < USER_REGION )
Flags | = PAGE_USER ;
//TODO: Global mapping
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t * Pagetable = AddressSpace - > PML4 ;
for ( int Level = 4 ; Level > 1 ; Level - - ) {
size_t * Entry = & Pagetable [ ( Virtual > > ( 12u + 9u * ( Level - 1 ) ) ) & 0x1FFu ] ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( ! ( * Entry & PAGE_PRESENT ) ) {
directptr_t Pointer = PhysAllocateZeroMem ( PAGE_SIZE ) ;
2020-11-26 04:01:22 +00:00
* Entry = ( size_t ) ( ( ( char * ) Pointer ) - DIRECT_REGION ) ;
2020-08-31 20:47:52 +00:00
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
* Entry | = Flags ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
Pagetable = ( size_t * ) ( ( ( char * ) ( * Entry & 0x7ffffffffffff000ull ) + DIRECT_REGION ) ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t * Entry = & Pagetable [ ( Virtual > > 12u ) & 0x1FFu ] ;
* Entry = Flags | PhysicalAddress ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( AddressSpace ! = NULL ) {
TicketUnlock ( & AddressSpace - > Lock ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void UnmapVirtualMemory ( address_space_t * AddressSpace , void * VirtualAddress ) {
//ASSERT(AddressSpace != NULL);
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
TicketAttemptLock ( & AddressSpace - > Lock ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t * Entry ;
GetPageFromTables ( AddressSpace , ( size_t ) VirtualAddress , & Entry ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
* Entry = 0 ;
InvalidatePage ( ( size_t ) VirtualAddress ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( AddressSpace ! = NULL ) {
TicketUnlock ( & AddressSpace - > Lock ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void CacheVirtualMemory ( address_space_t * AddressSpace , void * VirtualAddress , pagecache_t Cache ) {
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
//ASSERT(AddressSpace != NULL);
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
TicketAttemptLock ( & AddressSpace - > Lock ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
size_t * Entry ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
GetPageFromTables ( AddressSpace , ( size_t ) VirtualAddress , & Entry ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
* Entry & = ~ ( ( 1 < < 6 ) | ( 1 < < 2 ) | ( 1 < < 3 ) ) ;
* Entry | = GetCachingAttribute ( Cache ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
InvalidatePage ( ( size_t ) VirtualAddress ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( AddressSpace ! = NULL ) {
TicketUnlock ( & AddressSpace - > Lock ) ;
}
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void * AllocateMemory ( size_t Bits ) {
TicketAttemptLock ( & AllocatorLock ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void * Result = AllocatorMalloc ( Allocator , Bits ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( Result = = NULL ) {
if ( ! ExpandAllocator ( Bits ) ) {
TicketUnlock ( & AllocatorLock ) ;
return 0ULL ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
Result = AllocatorMalloc ( Allocator , Bits ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( Result ! = NULL ) {
memset ( Result , 0 , Bits ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
TicketUnlock ( & AllocatorLock ) ;
return Result ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void * ReallocateMemory ( void * Address , size_t NewSize ) {
TicketAttemptLock ( & AllocatorLock ) ;
void * Result = AllocatorRealloc ( Allocator , Address , NewSize ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
if ( Result = = NULL ) {
if ( ! ExpandAllocator ( NewSize ) ) {
TicketUnlock ( & AllocatorLock ) ;
return 0ULL ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
Result = AllocatorRealloc ( Allocator , Address , NewSize ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
TicketUnlock ( & AllocatorLock ) ;
return Result ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void FreeMemory ( void * Address ) {
TicketAttemptLock ( & AllocatorLock ) ;
AllocatorFree ( Allocator , Address ) ;
TicketUnlock ( & AllocatorLock ) ;
}
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
void * AllocateKernelStack ( ) {
void * StackAddress = NULL ;
size_t StackSize = PAGE_SIZE * 4 ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
TicketAttemptLock ( & StackLock ) ;
if ( ListIsEmpty ( & StackFreeList ) ) {
StackAddress = StackPointer ;
StackPointer = ( void * ) ( ( ( char * ) StackPointer ) + ( 4 * KiB ) + StackSize ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
for ( size_t i = 0 ; i < ( StackSize / PAGE_SIZE ) ; i + + ) {
directptr_t NewStack ;
NewStack = PhysAllocateZeroMem ( PAGE_SIZE ) ;
MapVirtualMemory ( & KernelAddressSpace , ( void * ) ( ( size_t ) StackAddress + i * PAGE_SIZE ) , ( size_t ) ( ( char * ) NewStack ) - DIRECT_REGION , MAP_WRITE ) ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
} else {
list_entry_t * StackEntry = StackFreeList . Next ;
ListRemove ( StackEntry ) ;
memset ( StackEntry , 0 , StackSize ) ;
StackAddress = ( void * ) StackEntry ;
2020-04-11 21:59:39 +00:00
}
2020-08-31 20:47:52 +00:00
TicketUnlock ( & StackLock ) ;
StackAddress = ( void * ) ( ( size_t ) StackAddress + StackSize ) ;
StackAddress = ( void * ) ( ( size_t ) StackAddress - sizeof ( size_t ) * 2 ) ;
2020-04-11 21:59:39 +00:00
2020-08-31 20:47:52 +00:00
return StackAddress ;
}
void FreeKernelStack ( void * StackAddress ) {
TicketAttemptLock ( & StackLock ) ;
list_entry_t * ListEntry = ( list_entry_t * ) ( ( ( size_t ) ( StackAddress ) + ( sizeof ( size_t ) * 2 ) ) - ( PAGE_SIZE * 4 ) ) ;
ListAdd ( & StackFreeList , ListEntry ) ;
TicketUnlock ( & StackLock ) ;
}