Move the code cache into Environment, and out of knight/shared.

This commit is contained in:
David Anderson 2015-02-23 23:12:45 -08:00
parent 57ba8fd09b
commit c70e87d582
22 changed files with 51 additions and 1570 deletions

View File

@ -1,33 +0,0 @@
#include <stdio.h>
#include "KeCommon.h"
using namespace Knight;
size_t Knight::KE_PFormat(char *buffer, size_t maxlength, const char *fmt, ...)
{
size_t len;
va_list ap;
va_start(ap, fmt);
len = KE_PFormatArgs(buffer, maxlength, fmt, ap);
va_end(ap);
return len;
}
size_t Knight::KE_PFormatArgs(char *buffer, size_t maxlength, const char *fmt, va_list ap)
{
size_t len;
len = vsnprintf(buffer, maxlength, fmt, ap);
if (len >= maxlength)
{
buffer[maxlength - 1] = '\0';
return (maxlength - 1);
}
else
{
return len;
}
}

View File

@ -1,36 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_COMMON_UTILS_H_
#define _INCLUDE_KNIGHT_KE_COMMON_UTILS_H_
#include <stddef.h>
#include <stdarg.h>
namespace Knight
{
/**
* @brief Formats a buffer with C platform rules.
*
* Unlink platform snprintf, this will never return nonsense values like -1.
*
* @param buffer Buffer to store to.
* @param maxlength Maximum length of buffer (including null terminator).
* @param fmt printf() format string.
* @param ... Formatting arguments.
* @return Number of characters written.
*/
extern size_t KE_PFormat(char *buffer, size_t maxlength, const char *fmt, ...);
/**
* @brief Formats a buffer with C platform rules.
*
* Unlink platform snprintf, this will never return nonsense values like -1.
*
* @param buffer Buffer to store to.
* @param maxlength Maximum length of buffer (including null terminator).
* @param fmt printf() format string.
* @param args Formatting arguments.
* @return Number of characters written.
*/
extern size_t KE_PFormatArgs(char *buffer, size_t maxlength, const char *fmt, va_list args);
}
#endif //_INCLUDE_KNIGHT_KE_COMMON_UTILS_H_

View File

@ -1,416 +0,0 @@
#include <string.h>
#include <KeHashTable.h>
using namespace Knight;
struct KeHashNode
{
KeHashNode *next;
uint32_t key_hash;
const void *key;
void *value;
};
namespace Knight
{
class KeHashTable
{
public:
KeHashNode **buckets;
uint32_t num_buckets;
uint32_t shift;
uint32_t num_entries;
KeHashGenerator key_gen;
KeHashMarshal key_m;
KeHashMarshal val_m;
ke_allocator_t *node_alloc;
size_t key_offs;
size_t val_offs;
size_t node_size;
uint32_t grow_limit;
KeHashNode *free_list;
bool keep_free_list;
};
}
void *ke_DefHashMalloc(ke_allocator_t *alloc, size_t amt)
{
return malloc(amt);
}
void ke_DefHashFree(ke_allocator_t *alloc, void *addr)
{
free(addr);
}
ke_allocator_t s_DefHashAllocator =
{
ke_DefHashMalloc,
ke_DefHashFree,
NULL
};
KeHashTable *Knight::KE_CreateHashTable(
uint32_t bits,
KeHashGenerator key_gen,
const KeHashMarshal *key_marshal,
const KeHashMarshal *val_marshal,
ke_allocator_t *nodeAlloc,
bool keep_free_list)
{
KeHashTable *table;
if (bits >= 27)
{
bits = 26;
}
else if (bits < 4)
{
bits = 4;
}
/* Validate marshals. */
if ((key_marshal->bytes != 0
&& key_marshal->ctor == NULL)
|| (val_marshal->bytes != 0
&& val_marshal->ctor == NULL))
{
return NULL;
}
table = new KeHashTable;
table->key_gen = key_gen;
table->key_m = *key_marshal;
table->val_m = *val_marshal;
table->num_entries = 0;
table->shift = 32 - bits;
table->node_alloc = nodeAlloc == NULL ? &s_DefHashAllocator : nodeAlloc;
table->num_buckets = (1 << bits);
table->grow_limit = (uint32_t)(0.9f * table->num_buckets);
table->keep_free_list = keep_free_list;
table->free_list = NULL;
table->buckets = (KeHashNode **)malloc(sizeof(KeHashNode *) * table->num_buckets);
memset(table->buckets, 0, sizeof(KeHashNode *) * table->num_buckets);
table->key_offs = sizeof(KeHashNode);
if (table->key_m.bytes != 0 && table->key_m.bytes % 8 != 0)
{
table->key_m.bytes += 8;
table->key_m.bytes -= (table->key_m.bytes % 8);
}
table->val_offs = table->key_offs + table->key_m.bytes;
table->node_size = table->val_offs + table->val_m.bytes;
return table;
}
#define KE_GET_BUCKET(tbl, hsh) (&(tbl)->buckets[((hsh) * 0x9E3779B9) >> (tbl)->shift])
KeHashNode **ke_HashInternalFind(KeHashTable *table, uint32_t key_hash, const void *key)
{
KeHashNode *node;
KeHashNode **bucket;
bucket = KE_GET_BUCKET(table, key_hash);
/* :TODO: move to the front once found? */
while ((node = *bucket) != NULL)
{
if (node->key_hash == key_hash
&& ((table->key_m.cmp != NULL && table->key_m.cmp(node->key, key))
|| node->key == key))
{
return bucket;
}
bucket = &node->next;
}
return bucket;
}
void ke_ResizeHashTable(KeHashTable *table, uint32_t new_shift)
{
uint32_t entries;
KeHashNode *next;
KeHashNode *node;
KeHashNode **rbucket;
KeHashNode **old_buckets;
uint32_t old_num_buckets;
/* Save old data */
old_num_buckets = table->num_buckets;
old_buckets = table->buckets;
entries = table->num_entries;
/* Save new data */
table->num_buckets = (1 << new_shift);
table->shift = 32 - new_shift;
table->grow_limit = (uint32_t)(0.9f * table->num_buckets);
table->buckets = (KeHashNode **)malloc(sizeof(KeHashNode *) * table->num_buckets);
memset(table->buckets, 0, sizeof(KeHashNode *) * table->num_buckets);
/* For each old bucket... */
for (uint32_t i = 0;
i < old_num_buckets && entries != 0;
i++)
{
node = old_buckets[i];
/* Get each item in its list... */
while (node != NULL)
{
next = node->next;
/* Find the new replacement bucket it needs to go in. */
rbucket = KE_GET_BUCKET(table, node->key_hash);
/* Link this node to the next node in the new bucket. */
if (*rbucket == NULL)
{
node->next = NULL;
}
else
{
node->next = *rbucket;
}
/* Add us to the front of that bucket's list. */
*rbucket = node;
node = next;
}
}
free(old_buckets);
}
void Knight::KE_AddToHashTable(KeHashTable *table, const void *key, void *val)
{
KeHashNode *node;
uint32_t key_hash;
KeHashNode **bucket;
key_hash = table->key_gen(key);
bucket = ke_HashInternalFind(table, key_hash, key);
if ((node = *bucket) != NULL)
{
/* Already in the table */
if ((table->val_m.cmp != NULL && table->val_m.cmp(node->value, val))
|| node->value == val)
{
return;
}
/* Destroy old value if it's set. */
if (node->value != NULL && table->val_m.dtor != NULL)
{
table->val_m.dtor(node->value);
}
/* Construct or set the new value. */
if (table->val_m.bytes != 0)
{
table->val_m.ctor(node->value, val);
}
else
{
node->value = val;
}
return;
}
/* If we're overloaded, we may need to resize.
* Right now, we do this if we hit a .9 entry:buckets ratio.
*/
if (table->num_entries >= table->grow_limit)
{
ke_ResizeHashTable(table, table->shift << 1);
bucket = ke_HashInternalFind(table, key_hash, key);
}
if (table->free_list != NULL)
{
node = table->free_list;
table->free_list = node->next;
}
else
{
node = (KeHashNode *)table->node_alloc->alloc(table->node_alloc, table->node_size);
}
if (table->key_m.bytes == 0)
{
node->key = key;
}
else
{
node->key = (char *)node + table->key_offs;
table->key_m.ctor((void *)node->key, key);
}
if (table->val_m.bytes == 0)
{
node->value = val;
}
else
{
node->value = (char *)node + table->val_offs;
table->val_m.ctor(node->value, val);
}
node->next = *bucket;
node->key_hash = key_hash;
*bucket = node;
}
inline void ke_CleanUpHashNode(KeHashTable *table, KeHashNode *node)
{
/* Destroy old value if it's set. */
if (node->value != NULL && table->val_m.dtor != NULL)
{
table->val_m.dtor(node->value);
}
/* Destroy the key. */
if (table->key_m.dtor != NULL)
{
table->key_m.dtor(node->key);
}
/* Deallocate us as appropriate. */
if (table->keep_free_list)
{
node->next = table->free_list;
table->free_list = node;
}
else
{
table->node_alloc->dealloc(table->node_alloc, node);
}
}
void Knight::KE_RemoveFromHashTable(KeHashTable *table, const void *key)
{
KeHashNode *node;
uint32_t key_hash;
KeHashNode **bucket;
key_hash = table->key_gen(key);
bucket = ke_HashInternalFind(table, key_hash, key);
if ((node = *bucket) == NULL)
{
return;
}
/* Link the bucket to its next (removing us). */
*bucket = node->next;
ke_CleanUpHashNode(table, node);
}
bool Knight::KE_FindInHashTable(KeHashTable *table, const void *key, void **value)
{
KeHashNode *node;
uint32_t key_hash;
KeHashNode **bucket;
key_hash = table->key_gen(key);
bucket = ke_HashInternalFind(table, key_hash, key);
if ((node = *bucket) == NULL)
{
return false;
}
if (value != NULL)
{
*value = node->value;
}
return true;
}
void Knight::KE_DestroyHashTable(KeHashTable *table)
{
KeHashNode *node, *next;
/* Turn off this caching! */
table->keep_free_list = false;
/* Find entries in buckets that need to be freed. */
for (uint32_t i = 0; i < table->num_buckets; i++)
{
node = table->buckets[i];
while (node != NULL)
{
next = node->next;
ke_CleanUpHashNode(table, node);
node = next;
}
}
/* Free the free list */
while (table->free_list != NULL)
{
next = table->free_list->next;
ke_CleanUpHashNode(table, table->free_list);
table->free_list = next;
}
/* Destroy everything now. */
free(table->buckets);
delete table;
}
void Knight::KE_ClearHashTable(KeHashTable *table)
{
KeHashNode *node, *next;
/* Free every entry in the table. */
for (uint32_t i = 0; i < table->num_buckets; i++)
{
node = table->buckets[i];
while (node != NULL)
{
next = node->next;
ke_CleanUpHashNode(table, node);
node = next;
}
}
}
#if defined _MSC_VER && (defined _M_IX86 || defined _M_AMD64 || defined _M_X64)
#pragma intrinsic(_rotl)
#endif
uint32_t Knight::KE_HashString(const void *str)
{
uint32_t h;
const unsigned char *us;
h = 0;
for (us = (const unsigned char *)str; *us != 0; us++)
{
#if defined _MSC_VER && (defined _M_IX86 || defined _M_AMD64 || defined _M_X64)
h = _rotl(h, 4) ^ *us;
#else
h = ((h << 4) | (h >> 28)) ^ *us;
#endif
}
return h;
}
bool Knight::KE_AreStringsEqual(const void* str1, const void* str2)
{
return (strcmp((const char*)str1, (const char*)str2) == 0) ? true : false;
}

View File

@ -1,139 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_HASHTABLE_H_
#define _INCLUDE_KNIGHT_KE_HASHTABLE_H_
#include <stddef.h>
#include <stdint.h>
#include <KnightAllocator.h>
namespace Knight
{
class KeHashTable;
/**
* @brief Must generate a hash function given a key.
*
* @param key Pointer to the key.
* @return Hash value.
*/
typedef uint32_t (*KeHashGenerator)(const void *key);
/**
* @brief Must compare two values.
*
* @param val1 First value.
* @param val2 Second value.
* @return True if equal, false if not.
*/
typedef bool (*KeHashComparator)(const void *val1, const void *val2);
/**
* @brief Must call the destructor of the given data, and free if necessary.
*
* @param val Pointer to data.
*/
typedef void (*KeHashDestructor)(const void *val);
/**
* @brief Must transfer the contents of an object from the source to the destination.
*
* @param dest Destination address.
* @param source Source address.
*/
typedef void (*KeHashCopyCtor)(void *dest, const void *source);
/**
* @brief Contains information about how to process keys and values in a hash table.
*/
struct KeHashMarshal
{
size_t bytes; /**< Bytes of storage needed (0 to use pointers). */
KeHashComparator cmp; /**< Comparator (if NULL, void * comparison used) */
KeHashDestructor dtor; /**< Optional function for performing dtor cleanup. */
KeHashCopyCtor ctor; /**< If bytes != 0, must be a valid function
(ignored otherwise). */
};
/**
* @brief Creates a new hash table structure.
*
* @param bits Dictates starting number of buckets as a power of two.
* Pass 0 for the default (which is 4).
* @param key_gen Key generation function.
* @param key_marshal Structure detailing how to marshal keys.
* @param vak_marshal Structure detailing how to marshal values.
* @param nodeAlloc Node allocator (can be NULL for malloc/free).
* @param keep_free_list True to keep a free list of nodes, false otherwise.
* @return New hash table container.
*/
extern KeHashTable *KE_CreateHashTable(
uint32_t bits,
KeHashGenerator key_gen,
const KeHashMarshal *key_marshal,
const KeHashMarshal *val_marshal,
ke_allocator_t *nodeAlloc,
bool keep_free_list
);
/**
* @brief Destroys a hash table.
*
* @param table Hash table.
*/
extern void KE_DestroyHashTable(KeHashTable *table);
/**
* @brief Adds a key/value to the hash table. If the pair already exists, the old value
* is overwritten (calling any destructors as necessary).
*
* @param table Hash table.
* @param key Key pointer.
* @param val Value pointer.
*/
extern void KE_AddToHashTable(KeHashTable *table, const void *key, void *val);
/**
* @brief Removes a key entry from the hash table.
*
* @param table Hash table.
* @param key Key pointer.
*/
extern void KE_RemoveFromHashTable(KeHashTable *table, const void *key);
/**
* @brief Finds an entry in the hash table.
*
* @param table Hash table.
* @param key Key pointer.
* @param value Pointer to store the value (optional).
* @return On success, true is returned and value is filled if given.
* On failure, false is failed and outputs are undefined.
*/
extern bool KE_FindInHashTable(KeHashTable *table, const void *key, void **value);
/**
* @brief Clears all entries in the hash table (caching free entries when possible).
*
* @param table Hash table.
*/
extern void KE_ClearHashTable(KeHashTable *table);
/**
* @brief Generic function for hashing strings.
*
* @param str Key string.
* @return Hash value.
*/
extern uint32_t KE_HashString(const void *str);
/**
* @brief Generic case-sensitive comparison of strings.
*
* @param str1 First string.
* @param str2 Second string.
* @return True if equal, false otherwise.
*/
extern bool KE_AreStringsEqual(const void* str1, const void* str2);
}
#endif //_INCLUDE_KNIGHT_KE_HASHTABLE_H_

View File

@ -1,12 +0,0 @@
#ifndef _INCLUDE_KNIGHT_LINKING_H_
#define _INCLUDE_KNIGHT_LINKING_H_
#if defined KE_LINK_IMPORT
#error "Not yet supported"
#elif defined KE_LINK_EXPORT
#error "Not yet supported"
#else
#define KE_LINK
#endif
#endif //_INCLUDE_KNIGHT_LINKING_H_

View File

@ -1,263 +0,0 @@
#include "KePlatform.h"
#include <KeLumpAllocator.h>
#include <KeVector.h>
using namespace Knight;
/**
* :TODO: don't make this part of the page, because touching it means
* dirtying a page. Instead, we should have a separate linked list.
* Maybe that linked list itself could be marshalled from one page.
*/
struct KeLumpRegion
{
char *base;
char *cur;
size_t size;
size_t avail;
KeLumpRegion *next;
KeLumpRegion *prev;
};
class KeLumpAllocator
{
public:
KeLumpAllocator() : m_pUsableRegions(NULL), m_pUnusableRegions(NULL)
{
m_DefLumpSize = 65536;
#if defined KE_PLATFORM_WINDOWS
SYSTEM_INFO info;
GetSystemInfo(&info);
if (info.dwAllocationGranularity > m_DefLumpSize)
{
m_DefLumpSize = info.dwAllocationGranularity;
}
#endif
}
~KeLumpAllocator()
{
FreeRegionChain(m_pUsableRegions);
FreeRegionChain(m_pUnusableRegions);
}
void Reset()
{
KeLumpRegion *region;
/* Find the tail of the usable regions. */
region = m_pUsableRegions;
while (region != NULL)
{
if (region->next == NULL)
{
break;
}
}
/* Link the unusable chain into the usable chain. */
if (region == NULL)
{
m_pUsableRegions = m_pUnusableRegions;
}
else
{
region->next = m_pUnusableRegions;
m_pUnusableRegions->prev = region;
}
m_pUnusableRegions = NULL;
region = m_pUsableRegions;
while (region != NULL)
{
region->avail = region->size;
region->cur = region->base;
region = region->next;
}
}
void FreeRegionChain(KeLumpRegion *region)
{
KeLumpRegion *next;
while (region != NULL)
{
next = region->next;
#if defined KE_PLATFORM_WINDOWS
VirtualFree(region, 0, MEM_RELEASE);
#else
free(region);
#endif
region = next;
}
}
void *Alloc(size_t size)
{
char *blob;
KeLumpRegion *region;
if (size % 8 != 0)
{
size += 8;
size -= size % 8;
}
region = FindRegionForSize(size);
blob = region->cur;
region->avail -= size;
region->cur += size;
/**
* Technically we could make one last small allocation, but
* this edge case is not worth the extra work.
*/
if (region->avail < 8)
{
/* Unlink us from the usable list */
if (region == m_pUsableRegions)
{
m_pUsableRegions = m_pUsableRegions->next;
m_pUsableRegions->prev = NULL;
}
else
{
region->prev->next = region->next;
if (region->next != NULL)
{
region->next->prev = region->prev;
}
}
/* Link us into the unusable list */
region->prev = NULL;
region->next = m_pUnusableRegions;
if (m_pUnusableRegions != NULL)
{
m_pUnusableRegions->prev = region;
}
m_pUnusableRegions = region;
}
return blob;
}
private:
KeLumpRegion *FindRegionForSize(size_t size)
{
char *base;
KeLumpRegion *region;
size_t size_of_region;
/**
* :TODO: replace this with a priority queue or something
* that's actually fast. Even worse is we dirty pages by
* doing this. Ouch!
*/
region = m_pUsableRegions;
while (region != NULL)
{
if (region->avail >= size)
{
return region;
}
region = region->next;
}
/* Make sure regions end at 8-byte alignment. */
size_of_region = sizeof(KeLumpRegion);
if (size_of_region % 8 != 0)
{
size_of_region += 8;
size_of_region -= size_of_region % 8;
}
/* If the size is too big, fix that. */
if (size > m_DefLumpSize - size_of_region)
{
size += m_DefLumpSize;
size -= size % m_DefLumpSize;
}
else
{
size = m_DefLumpSize;
}
#if defined KE_PLATFORM_WINDOWS
base = (char *)VirtualAlloc(
NULL,
m_DefLumpSize,
MEM_COMMIT|MEM_RESERVE,
PAGE_READWRITE);
#else
base = (char*)valloc(m_DefLumpSize);
#endif
/* Initialize the region */
region = (KeLumpRegion *)base;
region->base = &base[size_of_region];
region->size = size - size_of_region;
region->cur = region->base;
region->avail = region->size;
region->prev = NULL;
region->next = m_pUsableRegions;
if (m_pUsableRegions != NULL)
{
m_pUsableRegions->prev = region;
}
m_pUsableRegions = region;
return region;
}
private:
KeLumpRegion *m_pUsableRegions;
KeLumpRegion *m_pUnusableRegions;
size_t m_DefLumpSize;
};
inline KeLumpAllocator *ke_LumpFromAllocator(ke_allocator_t *arena)
{
return (KeLumpAllocator *)arena->user;
}
void *ke_LumpAlloc(ke_allocator_t *arena, size_t size)
{
return ke_LumpFromAllocator(arena)->Alloc(size);
}
void ke_LumpFree(ke_allocator_t *arena, void *ptr)
{
}
ke_allocator_t * KE_LINK Knight::KE_CreateLumpAllocator()
{
ke_allocator_t *alloc;
alloc = new ke_allocator_t;
alloc->alloc = ke_LumpAlloc;
alloc->dealloc = ke_LumpFree;
alloc->user = new KeLumpAllocator();
return alloc;
}
void KE_LINK Knight::KE_DestroyLumpAllocator(ke_allocator_t *alloc)
{
delete ke_LumpFromAllocator(alloc);
delete alloc;
}
void KE_LINK Knight::KE_ResetLumpAllocator(ke_allocator_t *alloc)
{
ke_LumpFromAllocator(alloc)->Reset();
}

View File

@ -1,36 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_LUMP_ALLOCATOR_H_
#define _INCLUDE_KNIGHT_KE_LUMP_ALLOCATOR_H_
#include <KeLinking.h>
#include <KnightAllocator.h>
namespace Knight
{
/**
* @brief Creates a new lump allocator.
*
* The lump allocator is intended for cases where there are many allocations
* and none need to be freed. There is memory wastage, and the lump allocator
* is typically thrown away after use.
*
* @return New lump allocator.
*/
extern ke_allocator_t * KE_CreateLumpAllocator();
/**
* @brief Destroys a lump allocator, freeing all of its resources.
*
* @param lump Lump allocator.
*/
extern void KE_DestroyLumpAllocator(ke_allocator_t *alloc);
/**
* @brief Clears a lump allocator, so its memory can be re-used from
* the start.
*
* @param lump Lump allocator.
*/
extern void KE_ResetLumpAllocator(ke_allocator_t *alloc);
}
#endif //_INCLUDE_KNIGHT_KE_LUMP_ALLOCATOR_H_

View File

@ -1,148 +0,0 @@
#include "KePageAllocator.h"
using namespace Knight;
struct PageInfo
{
PageInfo *next;
void *base;
};
class Knight::KePageAllocator
{
public:
size_t page_size;
size_t page_granularity;
PageInfo *free_pages;
PageInfo *page_blocks;
};
static void *ke_LumpPageAlloc(KePageAllocator *alloc)
{
void *base;
char *page;
PageInfo *lump;
size_t pagesInBlock;
#if defined KE_PLATFORM_WINDOWS
base = VirtualAlloc(
NULL,
alloc->page_granularity,
MEM_COMMIT|MEM_RESERVE,
PAGE_READWRITE);
#elif defined KE_PLATFORM_POSIX
base = valloc(alloc->page_granularity);
#else
#error "Unsupported platform"
#endif
if (base == NULL)
{
return NULL;
}
lump = new PageInfo;
lump->base = base;
lump->next = alloc->page_blocks;
alloc->page_blocks = lump->next;
page = (char *)base + alloc->page_size;
pagesInBlock = alloc->page_granularity / alloc->page_size;
for (size_t i = 1; i < pagesInBlock; i++)
{
lump = new PageInfo;
lump->base = page;
lump->next = alloc->free_pages;
alloc->free_pages = lump;
page += alloc->page_size;
}
return base;
}
KePageAllocator *Knight::KE_CreatePageAllocator()
{
KePageAllocator *alloc;
alloc = new KePageAllocator;
#if defined KE_PLATFORM_WINDOWS
SYSTEM_INFO info;
GetSystemInfo(&info);
alloc->page_size = info.dwPageSize;
alloc->page_granularity = info.dwAllocationGranularity;
#elif defined KE_PLATFORM_POSIX
alloc->page_size = sysconf(_SC_PAGESIZE);
alloc->page_granularity = alloc->page_size * 16;
#else
#error "Unsupported platform"
#endif
alloc->free_pages = NULL;
alloc->page_blocks = NULL;
return alloc;
}
void Knight::KE_DestroyPageAllocator(KePageAllocator *alloc)
{
PageInfo *info, *next;
info = alloc->page_blocks;
while (info != NULL)
{
next = info->next;
#if defined KE_PLATFORM_WINDOWS
VirtualFree(info->base, 0, MEM_RELEASE);
#elif defined KE_PLATFORM_WINDOWS
free(info->base);
#else
#error "Unsupported platform"
#endif
delete info;
next = info;
}
info = alloc->free_pages;
while (info != NULL)
{
next = info->next;
delete info;
info = next;
}
}
void *Knight::KE_PageAlloc(KePageAllocator *alloc)
{
if (alloc->free_pages != NULL)
{
void *base;
PageInfo *info;
info = alloc->free_pages;
alloc->free_pages = info->next;
base = info->base;
delete info;
return base;
}
return ke_LumpPageAlloc(alloc);
}
void Knight::KE_PageFree(KePageAllocator *alloc, void *page)
{
PageInfo *info;
info = new PageInfo;
info->base = page;
info->next = alloc->free_pages;
alloc->free_pages = info->next;
}
size_t Knight::KE_PageSize(KePageAllocator *alloc)
{
return alloc->page_size;
}

View File

@ -1,50 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_PAGE_ALLOCATOR_H_
#define _INCLUDE_KNIGHT_KE_PAGE_ALLOCATOR_H_
#include <stddef.h>
#include <stdint.h>
namespace Knight
{
class KePageAllocator;
/**
* @brief Creates a page allocator.
*
* @return New page allocator.
*/
extern KePageAllocator *KE_CreatePageAllocator();
/**
* @brief Destroys a page allocator, freeing all live pages it owns.
*
* @param Page allocator.
*/
extern void KE_DestroyPageAllocator(KePageAllocator *alloc);
/**
* @brief Allocates a page of memory.
*
* @param alloc Page allocator.
* @return Page of memory.
*/
extern void *KE_PageAlloc(KePageAllocator *alloc);
/**
* @brief Frees a page of memory.
*
* @param alloc Page allocator.
* @param page Page of memory.
*/
extern void KE_PageFree(KePageAllocator *alloc, void *page);
/**
* @brief Returns the size of a page.
*
* @param alloc Page allocator.
* @return Page size.
*/
extern size_t KE_PageSize(KePageAllocator *alloc);
}
#endif //_INCLUDE_KNIGHT_KE_PAGE_ALLOCATOR_H_

View File

@ -1,29 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_PLATFORM_H_
#define _INCLUDE_KNIGHT_KE_PLATFORM_H_
#if defined WIN32
#define KE_PLATFORM_WINDOWS
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#include <malloc.h>
#if !defined alloca
#define alloca _alloca
#endif
#else
#define KE_PLATFORM_POSIX
#if defined __linux__
#define KE_PLATFORM_LINUX
#elif defined __APPLE__
#define KE_PLATFORM_APPLE
#else
#error "TODO"
#endif
#endif
#endif //_INCLUDE_KNIGHT_KE_PLATFORM_H_

View File

@ -1,162 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_SECTOR_STACK_H_
#define _INCLUDE_KNIGHT_KE_SECTOR_STACK_H_
#include <KnightAllocator.h>
#include <KeVector.h>
namespace Knight
{
template <class T>
class KeSectorStack
{
public:
static const size_t DEFAULT_SECTOR_SIZE = 64;
KeSectorStack() : m_SectorSize(DEFAULT_SECTOR_SIZE), m_UsedSize(0), m_MaxUsedSize(0)
{
m_pAlloc = NULL;
}
KeSectorStack(size_t sectorSize) : m_SectorSize(sectorSize), m_UsedSize(0), m_MaxUsedSize(0)
{
m_pAlloc = NULL;
}
KeSectorStack(size_t sectorSize, ke_allocator_t *alloc) :
m_SectorSize(sectorSize), m_UsedSize(0), m_pAlloc(alloc), m_MaxUsedSize(0)
{
}
~KeSectorStack()
{
clear();
}
void clear()
{
T *sector;
size_t last_sector;
size_t last_sector_item;
if (m_MaxUsedSize == 0)
{
return;
}
last_sector = (m_MaxUsedSize - 1) / m_SectorSize;
last_sector_item = (m_MaxUsedSize - 1) % m_SectorSize;
for (size_t i = 0; i < last_sector; i++)
{
sector = m_Sectors[i];
for (size_t j = 0; j < m_SectorSize; j++)
{
sector[j].~T();
}
}
sector = m_Sectors[last_sector];
for (size_t i = 0; i <= last_sector_item; i++)
{
sector[i].~T();
}
clear_no_dtors();
}
void clear_no_dtors()
{
for (size_t i = 0; i < m_Sectors.size(); i++)
{
free_sector(m_Sectors[i]);
}
m_Sectors.clear();
}
bool empty()
{
return (m_UsedSize == 0) ? true : false;
}
void push(const T & val)
{
if ((m_UsedSize / m_SectorSize) >= m_Sectors.size())
{
/* Create a new sector */
T * sector;
if (m_pAlloc == NULL)
{
sector = (T *)malloc(sizeof(T) * m_SectorSize);
}
else
{
sector = (T *)m_pAlloc->alloc(m_pAlloc, sizeof(T) * m_SectorSize);
}
m_Sectors.push_back(sector);
}
at(m_UsedSize) = val;
m_UsedSize++;
/* Keep track of the maximum used size so we can defer the
* massive destruction job until the end.
*/
if (m_UsedSize > m_MaxUsedSize)
{
m_MaxUsedSize = m_UsedSize;
}
}
void pop()
{
m_UsedSize--;
}
void pop_all()
{
m_UsedSize = 0;
}
T & front()
{
return at(m_UsedSize - 1);
}
size_t size()
{
return m_UsedSize;
}
private:
T & at(size_t x)
{
return m_Sectors[x / m_SectorSize][x % m_SectorSize];
}
void free_sector(T * sector)
{
if (m_pAlloc == NULL)
{
free(sector);
}
else if (m_pAlloc->dealloc != NULL)
{
m_pAlloc->dealloc(m_pAlloc, sector);
}
}
private:
KeVector<T *> m_Sectors;
size_t m_SectorSize;
size_t m_UsedSize;
ke_allocator_t *m_pAlloc;
size_t m_MaxUsedSize;
};
}
#endif //_INCLUDE_KNIGHT_KE_SECTOR_STACK_H_

View File

@ -1,158 +0,0 @@
#ifndef _INCLUDE_KNIGHT_KE_VECTOR_H_
#define _INCLUDE_KNIGHT_KE_VECTOR_H_
#include <new>
namespace Knight
{
template <class T>
class KeVector
{
public:
KeVector<T>() : m_Data(NULL), m_Size(0), m_CurrentUsedSize(0)
{
}
KeVector<T>(const KeVector<T> & other)
{
m_Size = other.m_CurrentUsedSize;
m_CurrentUsedSize = other.m_CurrentUsedSize;
if (m_Size > 0)
{
m_Data = new T[other.m_CurrentUsedSize];
for (size_t i = 0; i < m_Size; i++)
{
m_Data[i] = other.m_Data[i];
}
}
else
{
m_Data = NULL;
}
}
~KeVector<T>()
{
clear();
}
KeVector & operator =(const KeVector<T> & other)
{
clear();
if (other.m_CurrentUsedSize)
{
m_Data = new T[other.m_CurrentUsedSize];
m_Size = other.m_CurrentUsedSize;
m_CurrentUsedSize = other.m_CurrentUsedSize;
for (size_t i = 0; i < m_Size; i++)
{
m_Data[i] = other.m_Data[i];
}
}
}
size_t size() const
{
return m_CurrentUsedSize;
}
void push_back(const T & elem)
{
GrowIfNeeded(1);
new (&m_Data[m_CurrentUsedSize]) T(elem);
m_CurrentUsedSize++;
}
void pop_back(const T & elem)
{
if (m_CurrentUsedSize == 0)
{
return;
}
m_CurrentUsedSize--;
m_Data[m_CurrentUsedSize].~T();
}
bool is_empty()
{
return (m_CurrentUsedSize == 0);
}
T & operator [](size_t pos)
{
return m_Data[pos];
}
const T & operator [](size_t pos) const
{
return m_Data[pos];
}
void clear()
{
for (size_t i = 0; i < m_CurrentUsedSize; i++)
{
m_Data[i].~T();
}
free(m_Data);
m_Data = NULL;
m_Size = 0;
m_CurrentUsedSize = 0;
}
private:
void Grow(size_t amount)
{
T *new_data;
size_t new_size;
if (m_Size == 0)
{
new_size = 8;
}
else
{
new_size = m_Size * 2;
}
while (m_CurrentUsedSize + amount > new_size)
{
new_size *= 2;
}
new_data = (T *)malloc(sizeof(T) * new_size);
for (size_t i = 0; i < m_CurrentUsedSize; i++)
{
new (&new_data[i]) T(m_Data[i]);
m_Data[i].~T();
}
free(m_Data);
m_Data = new_data;
m_Size = new_size;
}
void GrowIfNeeded(size_t amount)
{
if (m_CurrentUsedSize + amount >= m_Size)
{
Grow(amount);
}
}
private:
T *m_Data;
size_t m_Size;
size_t m_CurrentUsedSize;
};
}
#endif //_INCLUDE_KNIGHT_KE_VECTOR_H_

View File

@ -1,37 +0,0 @@
#ifndef _INCLUDE_KNIGHT_ALLOCATOR_H_
#define _INCLUDE_KNIGHT_ALLOCATOR_H_
#include <stddef.h>
#include <stdlib.h>
struct ke_allocator_s;
typedef struct ke_allocator_s ke_allocator_t;
typedef void *(*KEFN_ALLOCATOR)(ke_allocator_t *, size_t);
typedef void (*KEFN_DEALLOCATOR)(ke_allocator_t *, void *);
struct ke_allocator_s
{
KEFN_ALLOCATOR alloc;
KEFN_DEALLOCATOR dealloc;
void *user;
};
inline void *operator new(size_t size, ke_allocator_t *alloc)
{
return alloc->alloc(alloc, size);
}
inline void *operator new [](size_t size, ke_allocator_t *alloc)
{
return alloc->alloc(alloc, size);
}
template <typename T>
void ke_destroy(ke_allocator_t *alloc, T * data)
{
data->~T();
alloc->dealloc(alloc, data);
}
#endif //_INCLUDE_KNIGHT_ALLOCATOR_H_

View File

@ -9,7 +9,6 @@ Includes = [
os.path.join(builder.sourcePath, 'public', 'amtl'),
os.path.join(builder.sourcePath, 'public', 'jit'),
os.path.join(builder.sourcePath, 'public', 'jit', 'x86'),
os.path.join(builder.sourcePath, 'knight', 'shared'),
# The include path for SP v2 stuff.
os.path.join(builder.sourcePath, 'sourcepawn', 'include'),
@ -31,6 +30,7 @@ def setup(binary):
library = setup(builder.compiler.StaticLibrary('sourcepawn'))
library.sources += [
'api.cpp',
'code-allocator.cpp',
'plugin-runtime.cpp',
'compiled-function.cpp',
'debug-trace.cpp',
@ -54,7 +54,6 @@ library.sources += [
'zlib/uncompr.c',
'zlib/zutil.c',
'md5/md5.cpp',
'../../knight/shared/KeCodeAllocator.cpp',
'../../public/jit/x86/assembler-x86.cpp',
]
libsourcepawn = builder.Add(library).binary

View File

@ -13,7 +13,6 @@
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <KeCodeAllocator.h>
#include "x86/jit_x86.h"
#include "environment.h"
#include "api.h"
@ -74,7 +73,7 @@ SourcePawnEngine::ExecAlloc(size_t size)
void *
SourcePawnEngine::AllocatePageMemory(size_t size)
{
return g_Jit.AllocCode(size);
return Environment::get()->AllocateCode(size);
}
void
@ -92,7 +91,7 @@ SourcePawnEngine::SetReadWrite(void *ptr)
void
SourcePawnEngine::FreePageMemory(void *ptr)
{
g_Jit.FreeCode(ptr);
Environment::get()->FreeCode(ptr);
}
void

View File

@ -1,18 +1,16 @@
#include <KePlatform.h>
#include <assert.h>
#include <string.h>
#include <am-utility.h>
#if defined KE_PLATFORM_WINDOWS
#if defined(WIN32)
#include <windows.h>
#elif defined KE_PLATFORM_POSIX
#else
#include <unistd.h>
#include <stdlib.h>
#include <sys/mman.h>
#else
#error "TODO"
#endif
#include "KeCodeAllocator.h"
#include "code-allocator.h"
#define ALIGNMENT 16
@ -91,7 +89,7 @@ KeCodeCache *Knight::KE_CreateCodeCache()
memset(cache, 0, sizeof(KeCodeCache));
#if defined KE_PLATFORM_WINDOWS
#if defined(WIN32)
SYSTEM_INFO info;
GetSystemInfo(&info);
@ -264,13 +262,11 @@ KeCodeRegion *ke_AddRegionForSize(KeCodeCache *cache, size_t size)
size += cache->page_granularity * 2;
size -= size % cache->page_granularity;
#if defined KE_PLATFORM_WINDOWS
#if defined(WIN32)
region->block_start = (unsigned char *)VirtualAlloc(NULL, size, MEM_COMMIT|MEM_RESERVE, PAGE_EXECUTE_READWRITE);
#elif defined KE_PLATFORM_POSIX
#else
region->block_start = (unsigned char *)mmap(NULL, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
region->block_start = (region->block_start == MAP_FAILED) ? NULL : region->block_start;
#else
#error "TODO"
#endif
if (region->block_start == NULL)
@ -381,7 +377,7 @@ KeCodeRegion *ke_DestroyRegion(KeCodeRegion *region)
next = region->next;
#if defined KE_PLATFORM_WINDOWS
#if defined(WIN32)
VirtualFree(region->block_start, 0, MEM_RELEASE);
#else
munmap(region->block_start, region->total_size);

View File

@ -1,7 +1,6 @@
#ifndef _INCLUDE_KNIGHT_KE_CODE_ALLOCATOR_H_
#define _INCLUDE_KNIGHT_KE_CODE_ALLOCATOR_H_
#include <KeLinking.h>
#include <stddef.h>
#include <stdint.h>

View File

@ -11,7 +11,9 @@
// SourcePawn. If not, see http://www.gnu.org/licenses/.
//
#include "compiled-function.h"
#include "x86/jit_x86.h"
#include "environment.h"
using namespace sp;
CompiledFunction::CompiledFunction(void *entry_addr, cell_t pcode_offs, FixedArray<LoopEdge> *edges)
: entry_(entry_addr),
@ -22,5 +24,5 @@ CompiledFunction::CompiledFunction(void *entry_addr, cell_t pcode_offs, FixedArr
CompiledFunction::~CompiledFunction()
{
g_Jit.FreeCode(entry_);
Environment::get()->FreeCode(entry_);
}

View File

@ -26,7 +26,8 @@ Environment::Environment()
: debugger_(nullptr),
profiler_(nullptr),
jit_enabled_(true),
profiling_enabled_(false)
profiling_enabled_(false),
code_pool_(nullptr)
{
}
@ -41,14 +42,14 @@ Environment::New()
if (sEnvironment)
return nullptr;
Environment *env = new Environment();
if (!env->Initialize()) {
delete env;
sEnvironment = new Environment();
if (!sEnvironment->Initialize()) {
delete sEnvironment;
sEnvironment = nullptr;
return nullptr;
}
sEnvironment = env;
return env;
return sEnvironment;
}
Environment *
@ -64,6 +65,10 @@ Environment::Initialize()
api_v2_ = new SourcePawnEngine2();
watchdog_timer_ = new WatchdogTimer();
if ((code_pool_ = Knight::KE_CreateCodeCache()) == nullptr)
return false;
// Safe to initialize JIT now that we have the code cache.
if (!g_Jit.InitializeJIT())
return false;
@ -75,6 +80,10 @@ Environment::Shutdown()
{
watchdog_timer_->Shutdown();
g_Jit.ShutdownJIT();
Knight::KE_DestroyCodeCache(code_pool_);
assert(sEnvironment == this);
sEnvironment = nullptr;
}
void
@ -160,3 +169,15 @@ Environment::ReportError(PluginRuntime *runtime, int err, const char *errstr, ce
debugger_->OnContextExecuteError(runtime->GetDefaultContext(), &trace);
}
void *
Environment::AllocateCode(size_t size)
{
return Knight::KE_AllocCode(code_pool_, size);
}
void
Environment::FreeCode(void *code)
{
Knight::KE_FreeCode(code_pool_, code);
}

View File

@ -15,6 +15,7 @@
#include <sp_vm_api.h>
#include <am-utility.h> // Replace with am-cxx later.
#include "code-allocator.h"
class PluginRuntime;
@ -54,6 +55,10 @@ class Environment : public ISourcePawnEnvironment
const char *GetErrorString(int err);
void ReportError(PluginRuntime *runtime, int err, const char *errstr, cell_t rp_start);
// Allocate and free executable memory.
void *AllocateCode(size_t size);
void FreeCode(void *code);
// Helpers.
void SetProfiler(IProfilingTool *profiler) {
profiler_ = profiler;
@ -96,6 +101,8 @@ class Environment : public ISourcePawnEnvironment
IProfilingTool *profiler_;
bool jit_enabled_;
bool profiling_enabled_;
Knight::KeCodeCache *code_pool_;
};
class EnterProfileScope

View File

@ -40,7 +40,6 @@
#include "environment.h"
using namespace sp;
using namespace Knight;
#if defined USE_UNGEN_OPCODES
#include "ungen_opcodes.h"
@ -49,7 +48,6 @@ using namespace Knight;
#define __ masm.
JITX86 g_Jit;
KeCodeCache *g_pCodeCache = NULL;
static inline uint8_t *
LinkCode(AssemblerX86 &masm)
@ -57,7 +55,7 @@ LinkCode(AssemblerX86 &masm)
if (masm.outOfMemory())
return NULL;
void *code = Knight::KE_AllocCode(g_pCodeCache, masm.length());
void *code = Environment::get()->AllocateCode(masm.length());
if (!code)
return NULL;
@ -1901,8 +1899,6 @@ JITX86::JITX86()
bool
JITX86::InitializeJIT()
{
g_pCodeCache = KE_CreateCodeCache();
m_pJitEntry = GenerateEntry(&m_pJitReturn, &m_pJitTimeout);
if (!m_pJitEntry)
return false;
@ -1913,7 +1909,6 @@ JITX86::InitializeJIT()
if (!code)
return false;
MacroAssemblerX86::RunFeatureDetection(code);
KE_FreeCode(g_pCodeCache, code);
return true;
}
@ -1921,7 +1916,6 @@ JITX86::InitializeJIT()
void
JITX86::ShutdownJIT()
{
KE_DestroyCodeCache(g_pCodeCache);
}
CompiledFunction *
@ -1981,7 +1975,7 @@ JITX86::CreateFakeNative(SPVM_FAKENATIVE_FUNC callback, void *pData)
void
JITX86::DestroyFakeNative(SPVM_NATIVE_FUNC func)
{
KE_FreeCode(g_pCodeCache, (void *)func);
Environment::get()->FreeCode((void *)func);
}
ICompilation *
@ -2046,18 +2040,6 @@ JITX86::InvokeFunction(PluginRuntime *runtime, CompiledFunction *fn, cell_t *res
return err;
}
void *
JITX86::AllocCode(size_t size)
{
return Knight::KE_AllocCode(g_pCodeCache, size);
}
void
JITX86::FreeCode(void *code)
{
KE_FreeCode(g_pCodeCache, code);
}
void
JITX86::RegisterRuntime(PluginRuntime *rt)
{

View File

@ -19,7 +19,6 @@
#include <sp_vm_types.h>
#include <sp_vm_api.h>
#include <KeCodeAllocator.h>
#include <macro-assembler-x86.h>
#include <am-vector.h>
#include "jit_shared.h"
@ -173,9 +172,6 @@ class JITX86
ExternalAddress GetUniversalReturn() {
return ExternalAddress(m_pJitReturn);
}
void *AllocCode(size_t size);
void FreeCode(void *code);
uintptr_t FrameId() const {
return frame_id_;
}
@ -203,7 +199,6 @@ const Register dat = esi;
const Register tmp = ecx;
const Register frm = ebx;
extern Knight::KeCodeCache *g_pCodeCache;
extern JITX86 g_Jit;
#endif //_INCLUDE_SOURCEPAWN_JIT_X86_H_