First draft of dynamic detours using Ayuto's DynamicHooks library

https://github.com/Ayuto/DynamicHooks
This commit is contained in:
Peace-Maker 2016-12-11 22:02:10 -07:00
parent fd8866a540
commit 2e52ab24b7
84 changed files with 57658 additions and 114 deletions

View File

@ -238,8 +238,11 @@ class DHooksConfig(object):
os.path.join(self.sm_root, 'sourcepawn', 'include'),
os.path.join(self.sm_root, 'sourcepawn', 'vm'),
os.path.join(self.sm_root, 'sourcepawn', 'vm', 'x86'),
os.path.join(self.sm_root, 'public', 'amtl', 'include'),
os.path.join(self.sm_root, 'public', 'amtl', 'include'),
os.path.join(self.sm_root, 'public', 'amtl', 'amtl'),
os.path.join(builder.currentSourcePath, 'DynamicHooks', 'thirdparty'),
os.path.join(builder.currentSourcePath, 'DynamicHooks', 'thirdparty', 'AsmJit'),
os.path.join(builder.currentSourcePath, 'DynamicHooks'),
]
@ -256,6 +259,45 @@ program.sources += [
'natives.cpp',
'vhook.cpp',
'util.cpp',
'dynhooks_sourcepawn.cpp',
]
# DynamicHooks
program.sources += [
os.path.join('DynamicHooks', 'asm.cpp'),
os.path.join('DynamicHooks', 'hook.cpp'),
os.path.join('DynamicHooks', 'manager.cpp'),
os.path.join('DynamicHooks', 'registers.cpp'),
os.path.join('DynamicHooks', 'utilities.cpp'),
os.path.join('DynamicHooks', 'conventions', 'x86MsCdecl.cpp'),
os.path.join('DynamicHooks', 'conventions', 'x86MsStdcall.cpp'),
os.path.join('DynamicHooks', 'conventions', 'x86MsThiscall.cpp'),
]
# ASMJit
program.sources += [
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'assembler.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'compiler.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'compilercontext.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'constpool.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'containers.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'cpuinfo.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'globals.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'hlstream.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'logger.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'operand.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'podvector.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'runtime.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'utils.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'vmem.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'base', 'zone.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86assembler.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86compiler.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86compilercontext.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86compilerfunc.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86inst.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86operand.cpp'),
os.path.join('DynamicHooks', 'thirdparty', 'AsmJit', 'x86', 'x86operand_regs.cpp'),
]
program.sources += [os.path.join(DHooks.sm_root, 'public', 'smsdk_ext.cpp')]

457
DynamicHooks/asm.cpp Normal file
View File

@ -0,0 +1,457 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "asm.h"
#ifndef _WIN32
#include <inttypes.h>
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif // _GNU_SOURCE
#include <dlfcn.h>
#include <string.h>
#define REG_EAX 0
#define REG_ECX 1
#define REG_EDX 2
#define REG_EBX 3
#define IA32_MOV_REG_IMM 0xB8 // encoding is +r <imm32>
#endif
/**
* Checks if a call to a fpic thunk has just been written into dest.
* If found replaces it with a direct mov that sets the required register to the value of pc.
*
* @param dest Destination buffer where a call opcode + addr (5 bytes) has just been written.
* @param pc The program counter value that needs to be set (usually the next address from the source).
* @noreturn
*/
void check_thunks(unsigned char *dest, unsigned char *pc)
{
#if defined _WIN32
return;
#else
/* Step write address back 4 to the start of the function address */
unsigned char *writeaddr = dest - 4;
unsigned char *calloffset = *(unsigned char **)writeaddr;
unsigned char *calladdr = (unsigned char *)(dest + (intptr_t)calloffset);
/* Lookup name of function being called */
if ((*calladdr == 0x8B) && (*(calladdr+2) == 0x24) && (*(calladdr+3) == 0xC3))
{
//a thunk maybe?
char movByte = IA32_MOV_REG_IMM;
/* Calculate the correct mov opcode */
switch (*(calladdr+1))
{
case 0x04:
{
movByte += REG_EAX;
break;
}
case 0x1C:
{
movByte += REG_EBX;
break;
}
case 0x0C:
{
movByte += REG_ECX;
break;
}
case 0x14:
{
movByte += REG_EDX;
break;
}
default:
{
break;
}
}
/* Move our write address back one to where the call opcode was */
writeaddr--;
/* Write our mov */
*writeaddr = movByte;
writeaddr++;
/* Write the value - The provided program counter value */
*(void **)writeaddr = (void *)pc;
writeaddr += 4;
}
return;
#endif
}
//if dest is NULL, returns minimum number of bytes needed to be copied
//if dest is not NULL, it will copy the bytes to dest as well as fix CALLs and JMPs
//http://www.devmaster.net/forums/showthread.php?t=2311
int copy_bytes(unsigned char *func, unsigned char* dest, int required_len) {
int bytecount = 0;
while(bytecount < required_len && *func != 0xCC)
{
// prefixes F0h, F2h, F3h, 66h, 67h, D8h-DFh, 2Eh, 36h, 3Eh, 26h, 64h and 65h
int operandSize = 4;
int FPU = 0;
int twoByte = 0;
unsigned char opcode = 0x90;
unsigned char modRM = 0xFF;
while(*func == 0xF0 ||
*func == 0xF2 ||
*func == 0xF3 ||
(*func & 0xFC) == 0x64 ||
(*func & 0xF8) == 0xD8 ||
(*func & 0x7E) == 0x62)
{
if(*func == 0x66)
{
operandSize = 2;
}
else if((*func & 0xF8) == 0xD8)
{
FPU = *func;
if (dest)
*dest++ = *func++;
else
func++;
bytecount++;
break;
}
if (dest)
*dest++ = *func++;
else
func++;
bytecount++;
}
// two-byte opcode byte
if(*func == 0x0F)
{
twoByte = 1;
if (dest)
*dest++ = *func++;
else
func++;
bytecount++;
}
// opcode byte
opcode = *func++;
if (dest) *dest++ = opcode;
bytecount++;
// mod R/M byte
modRM = 0xFF;
if(FPU)
{
if((opcode & 0xC0) != 0xC0)
{
modRM = opcode;
}
}
else if(!twoByte)
{
if((opcode & 0xC4) == 0x00 ||
(opcode & 0xF4) == 0x60 && ((opcode & 0x0A) == 0x02 || (opcode & 0x09) == 0x09) ||
(opcode & 0xF0) == 0x80 ||
(opcode & 0xF8) == 0xC0 && (opcode & 0x0E) != 0x02 ||
(opcode & 0xFC) == 0xD0 ||
(opcode & 0xF6) == 0xF6)
{
modRM = *func++;
if (dest) *dest++ = modRM;
bytecount++;
}
}
else
{
if((opcode & 0xF0) == 0x00 && (opcode & 0x0F) >= 0x04 && (opcode & 0x0D) != 0x0D ||
(opcode & 0xF0) == 0x30 ||
opcode == 0x77 ||
(opcode & 0xF0) == 0x80 ||
(opcode & 0xF0) == 0xA0 && (opcode & 0x07) <= 0x02 ||
(opcode & 0xF8) == 0xC8)
{
// No mod R/M byte
}
else
{
modRM = *func++;
if (dest) *dest++ = modRM;
bytecount++;
}
}
// SIB
if((modRM & 0x07) == 0x04 &&
(modRM & 0xC0) != 0xC0)
{
if (dest)
*dest++ = *func++; //SIB
else
func++;
bytecount++;
}
// mod R/M displacement
// Dword displacement, no base
if((modRM & 0xC5) == 0x05) {
if (dest) {
*(unsigned int*)dest = *(unsigned int*)func;
dest += 4;
}
func += 4;
bytecount += 4;
}
// Byte displacement
if((modRM & 0xC0) == 0x40) {
if (dest)
*dest++ = *func++;
else
func++;
bytecount++;
}
// Dword displacement
if((modRM & 0xC0) == 0x80) {
if (dest) {
*(unsigned int*)dest = *(unsigned int*)func;
dest += 4;
}
func += 4;
bytecount += 4;
}
// immediate
if(FPU)
{
// Can't have immediate operand
}
else if(!twoByte)
{
if((opcode & 0xC7) == 0x04 ||
(opcode & 0xFE) == 0x6A || // PUSH/POP/IMUL
(opcode & 0xF0) == 0x70 || // Jcc
opcode == 0x80 ||
opcode == 0x83 ||
(opcode & 0xFD) == 0xA0 || // MOV
opcode == 0xA8 || // TEST
(opcode & 0xF8) == 0xB0 || // MOV
(opcode & 0xFE) == 0xC0 || // RCL
opcode == 0xC6 || // MOV
opcode == 0xCD || // INT
(opcode & 0xFE) == 0xD4 || // AAD/AAM
(opcode & 0xF8) == 0xE0 || // LOOP/JCXZ
opcode == 0xEB ||
opcode == 0xF6 && (modRM & 0x30) == 0x00) // TEST
{
if (dest)
*dest++ = *func++;
else
func++;
bytecount++;
}
else if((opcode & 0xF7) == 0xC2) // RET
{
if (dest) {
*(unsigned short*)dest = *(unsigned short*)func;
dest += 2;
}
func += 2;
bytecount += 2;
}
else if((opcode & 0xFC) == 0x80 ||
(opcode & 0xC7) == 0x05 ||
(opcode & 0xF8) == 0xB8 ||
(opcode & 0xFE) == 0xE8 || // CALL/Jcc
(opcode & 0xFE) == 0x68 ||
(opcode & 0xFC) == 0xA0 ||
(opcode & 0xEE) == 0xA8 ||
opcode == 0xC7 ||
opcode == 0xF7 && (modRM & 0x30) == 0x00)
{
if (dest) {
//Fix CALL/JMP offset
if ((opcode & 0xFE) == 0xE8) {
if (operandSize == 4)
{
*(long*)dest = ((func + *(long*)func) - dest);
//pRED* edit. func is the current address of the call address, +4 is the next instruction, so the value of $pc
check_thunks(dest+4, func+4);
}
else
*(short*)dest = ((func + *(short*)func) - dest);
} else {
if (operandSize == 4)
*(unsigned long*)dest = *(unsigned long*)func;
else
*(unsigned short*)dest = *(unsigned short*)func;
}
dest += operandSize;
}
func += operandSize;
bytecount += operandSize;
}
}
else
{
if(opcode == 0xBA || // BT
opcode == 0x0F || // 3DNow!
(opcode & 0xFC) == 0x70 || // PSLLW
(opcode & 0xF7) == 0xA4 || // SHLD
opcode == 0xC2 ||
opcode == 0xC4 ||
opcode == 0xC5 ||
opcode == 0xC6)
{
if (dest)
*dest++ = *func++;
else
func++;
}
else if((opcode & 0xF0) == 0x80) // Jcc -i
{
if (dest) {
if (operandSize == 4)
*(unsigned long*)dest = *(unsigned long*)func;
else
*(unsigned short*)dest = *(unsigned short*)func;
dest += operandSize;
}
func += operandSize;
bytecount += operandSize;
}
}
}
return bytecount;
}
//insert a specific JMP instruction at the given location
void inject_jmp(void* src, void* dest) {
*(unsigned char*)src = OP_JMP;
*(long*)((unsigned char*)src+1) = (long)((unsigned char*)dest - ((unsigned char*)src + OP_JMP_SIZE));
}
//fill a given block with NOPs
void fill_nop(void* src, unsigned int len) {
unsigned char* src2 = (unsigned char*)src;
while (len) {
*src2++ = OP_NOP;
--len;
}
}
void* eval_jump(void* src) {
unsigned char* addr = (unsigned char*)src;
if (!addr) return 0;
//import table jump
if (addr[0] == OP_PREFIX && addr[1] == OP_JMP_SEG) {
addr += 2;
addr = *(unsigned char**)addr;
//TODO: if addr points into the IAT
return *(void**)addr;
}
//8bit offset
else if (addr[0] == OP_JMP_BYTE) {
addr = &addr[OP_JMP_BYTE_SIZE] + *(char*)&addr[1];
//mangled 32bit jump?
if (addr[0] = OP_JMP) {
addr = addr + *(int*)&addr[1];
}
return addr;
}
/*
//32bit offset
else if (addr[0] == OP_JMP) {
addr = &addr[OP_JMP_SIZE] + *(int*)&addr[1];
}
*/
return addr;
}
/*
from ms detours package
static bool detour_is_imported(PBYTE pbCode, PBYTE pbAddress)
{
MEMORY_BASIC_INFORMATION mbi;
VirtualQuery((PVOID)pbCode, &mbi, sizeof(mbi));
__try {
PIMAGE_DOS_HEADER pDosHeader = (PIMAGE_DOS_HEADER)mbi.AllocationBase;
if (pDosHeader->e_magic != IMAGE_DOS_SIGNATURE) {
return false;
}
PIMAGE_NT_HEADERS pNtHeader = (PIMAGE_NT_HEADERS)((PBYTE)pDosHeader +
pDosHeader->e_lfanew);
if (pNtHeader->Signature != IMAGE_NT_SIGNATURE) {
return false;
}
if (pbAddress >= ((PBYTE)pDosHeader +
pNtHeader->OptionalHeader
.DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT].VirtualAddress) &&
pbAddress < ((PBYTE)pDosHeader +
pNtHeader->OptionalHeader
.DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT].VirtualAddress +
pNtHeader->OptionalHeader
.DataDirectory[IMAGE_DIRECTORY_ENTRY_IAT].Size)) {
return true;
}
return false;
}
__except(EXCEPTION_EXECUTE_HANDLER) {
return false;
}
}
*/

70
DynamicHooks/asm.h Normal file
View File

@ -0,0 +1,70 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef __ASM_H__
#define __ASM_H__
#define OP_JMP 0xE9
#define OP_JMP_SIZE 5
#define OP_NOP 0x90
#define OP_NOP_SIZE 1
#define OP_PREFIX 0xFF
#define OP_JMP_SEG 0x25
#define OP_JMP_BYTE 0xEB
#define OP_JMP_BYTE_SIZE 2
#ifdef __cplusplus
extern "C" {
#endif
void check_thunks(unsigned char *dest, unsigned char *pc);
//if dest is NULL, returns minimum number of bytes needed to be copied
//if dest is not NULL, it will copy the bytes to dest as well as fix CALLs and JMPs
//http://www.devmaster.net/forums/showthread.php?t=2311
int copy_bytes(unsigned char *func, unsigned char* dest, int required_len);
//insert a specific JMP instruction at the given location
void inject_jmp(void* src, void* dest);
//fill a given block with NOPs
void fill_nop(void* src, unsigned int len);
//evaluate a JMP at the target
void* eval_jump(void* src);
#ifdef __cplusplus
}
#endif
#endif //__ASM_H__

216
DynamicHooks/convention.h Normal file
View File

@ -0,0 +1,216 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _CONVENTION_H
#define _CONVENTION_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include <list>
#include <vector>
#include <stdio.h>
#include "registers.h"
// ============================================================================
// >> DataType_t
// ============================================================================
enum DataType_t
{
DATA_TYPE_VOID,
DATA_TYPE_BOOL,
DATA_TYPE_CHAR,
DATA_TYPE_UCHAR,
DATA_TYPE_SHORT,
DATA_TYPE_USHORT,
DATA_TYPE_INT,
DATA_TYPE_UINT,
DATA_TYPE_LONG,
DATA_TYPE_ULONG,
DATA_TYPE_LONG_LONG,
DATA_TYPE_ULONG_LONG,
DATA_TYPE_FLOAT,
DATA_TYPE_DOUBLE,
DATA_TYPE_POINTER,
DATA_TYPE_STRING,
DATA_TYPE_OBJECT
};
typedef struct DataTypeSized_s {
DataTypeSized_s()
{
type = DATA_TYPE_POINTER;
size = 0;
}
DataType_t type;
size_t size;
} DataTypeSized_t;
// ============================================================================
// >> FUNCTIONS
// ============================================================================
/*
Returns the size after applying alignment.
@param <size>:
The size that should be aligned.
@param <alignment>:
The alignment that should be used.
*/
inline int Align(int size, int alignment)
{
int unaligned = size % alignment;
if (unaligned == 0)
return size;
return size + (alignment - unaligned);
}
/*
Returns the size of a data type after applying alignment.
@param <type>:
The data type you would like to get the size of.
@param <alignment>:
The alignment that should be used.
*/
inline int GetDataTypeSize(DataTypeSized_t type, int iAlignment=4)
{
switch(type.type)
{
case DATA_TYPE_VOID: return 0;
case DATA_TYPE_BOOL: return Align(sizeof(bool), iAlignment);
case DATA_TYPE_CHAR: return Align(sizeof(char), iAlignment);
case DATA_TYPE_UCHAR: return Align(sizeof(unsigned char), iAlignment);
case DATA_TYPE_SHORT: return Align(sizeof(short), iAlignment);
case DATA_TYPE_USHORT: return Align(sizeof(unsigned short), iAlignment);
case DATA_TYPE_INT: return Align(sizeof(int), iAlignment);
case DATA_TYPE_UINT: return Align(sizeof(unsigned int), iAlignment);
case DATA_TYPE_LONG: return Align(sizeof(long), iAlignment);
case DATA_TYPE_ULONG: return Align(sizeof(unsigned long), iAlignment);
case DATA_TYPE_LONG_LONG: return Align(sizeof(long long), iAlignment);
case DATA_TYPE_ULONG_LONG: return Align(sizeof(unsigned long long), iAlignment);
case DATA_TYPE_FLOAT: return Align(sizeof(float), iAlignment);
case DATA_TYPE_DOUBLE: return Align(sizeof(double), iAlignment);
case DATA_TYPE_POINTER: return Align(sizeof(void *), iAlignment);
case DATA_TYPE_STRING: return Align(sizeof(char *), iAlignment);
case DATA_TYPE_OBJECT: return type.size;
default: puts("Unknown data type.");
}
return 0;
}
// ============================================================================
// >> CLASSES
// ============================================================================
/*
This is the base class for every calling convention. Inherit from this class
to create your own calling convention.
*/
class ICallingConvention
{
public:
/*
Initializes the calling convention.
@param <vecArgTypes>:
A list of DataType_t objects, which define the arguments of the function.
@param <returnType>:
The return type of the function.
*/
ICallingConvention(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment=4)
{
m_vecArgTypes = vecArgTypes;
std::vector<DataTypeSized_t>::iterator it = m_vecArgTypes.begin();
for (; it != m_vecArgTypes.end(); it++)
{
DataTypeSized_t &type = *it;
if (!type.size)
type.size = GetDataTypeSize(type);
}
m_returnType = returnType;
if (!m_returnType.size)
m_returnType.size = GetDataTypeSize(m_returnType);
m_iAlignment = iAlignment;
}
/*
This should return a list of Register_t values. These registers will be
saved for later access.
*/
virtual std::list<Register_t> GetRegisters() = 0;
/*
Returns the number of bytes that should be added to the stack to clean up.
*/
virtual int GetPopSize() = 0;
virtual int GetArgStackSize() = 0;
virtual void** GetStackArgumentPtr(CRegisters* pRegisters) = 0;
/*
Returns a pointer to the argument at the given index.
@param <iIndex>:
The index of the argument.
@param <pRegisters>:
A snapshot of all saved registers.
*/
virtual void* GetArgumentPtr(int iIndex, CRegisters* pRegisters) = 0;
/*
*/
virtual void ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr) = 0;
/*
Returns a pointer to the return value.
@param <pRegisters>:
A snapshot of all saved registers.
*/
virtual void* GetReturnPtr(CRegisters* pRegisters) = 0;
/*
*/
virtual void ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr) = 0;
public:
std::vector<DataTypeSized_t> m_vecArgTypes;
DataTypeSized_t m_returnType;
int m_iAlignment;
};
#endif // _CONVENTION_H

View File

@ -0,0 +1,46 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _X86_GCC_CDECL_H
#define _X86_GCC_CDECL_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "x86MsCdecl.h"
// ============================================================================
// >> CLASSES
// ============================================================================
typedef x86MsCdecl x86GccCdecl;
#endif // _X86_GCC_CDECL_H

View File

@ -0,0 +1,46 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _X86_GCC_THISCALL_H
#define _X86_GCC_THISCALL_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "x86GccCdecl.h"
// ============================================================================
// >> CLASSES
// ============================================================================
typedef x86GccCdecl x86GccThiscall;
#endif // _X86_GCC_THISCALL_H

View File

@ -0,0 +1,145 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "x86MsCdecl.h"
#include <string.h>
// ============================================================================
// >> x86MsCdecl
// ============================================================================
x86MsCdecl::x86MsCdecl(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment) :
ICallingConvention(vecArgTypes, returnType, iAlignment)
{
if (m_returnType.size > 4)
{
m_pReturnBuffer = malloc(m_returnType.size);
}
else
{
m_pReturnBuffer = NULL;
}
}
x86MsCdecl::~x86MsCdecl()
{
if (m_pReturnBuffer)
{
free(m_pReturnBuffer);
}
}
std::list<Register_t> x86MsCdecl::GetRegisters()
{
std::list<Register_t> registers;
registers.push_back(ESP);
if (m_returnType.type == DATA_TYPE_FLOAT || m_returnType.type == DATA_TYPE_DOUBLE)
{
registers.push_back(ST0);
}
else
{
registers.push_back(EAX);
if (m_pReturnBuffer)
{
registers.push_back(EDX);
}
}
return registers;
}
int x86MsCdecl::GetPopSize()
{
return 0;
}
int x86MsCdecl::GetArgStackSize()
{
int iArgStackSize = 0;
for (unsigned int i = 0; i < m_vecArgTypes.size(); i++)
{
iArgStackSize += m_vecArgTypes[i].size;
}
return iArgStackSize;
}
void** x86MsCdecl::GetStackArgumentPtr(CRegisters* pRegisters)
{
return (void **)(pRegisters->m_esp->GetValue<unsigned long>() + 4);
}
void* x86MsCdecl::GetArgumentPtr(int iIndex, CRegisters* pRegisters)
{
int iOffset = 4;
for(int i=0; i < iIndex; i++)
{
iOffset += m_vecArgTypes[i].size;
}
return (void *) (pRegisters->m_esp->GetValue<unsigned long>() + iOffset);
}
void x86MsCdecl::ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr)
{
}
void* x86MsCdecl::GetReturnPtr(CRegisters* pRegisters)
{
if (m_returnType.type == DATA_TYPE_FLOAT || m_returnType.type == DATA_TYPE_DOUBLE)
return pRegisters->m_st0->m_pAddress;
if (m_pReturnBuffer)
{
// First half in eax, second half in edx
memcpy(m_pReturnBuffer, pRegisters->m_eax, 4);
memcpy((void *) ((unsigned long) m_pReturnBuffer + 4), pRegisters->m_edx, 4);
return m_pReturnBuffer;
}
return pRegisters->m_eax->m_pAddress;
}
void x86MsCdecl::ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr)
{
if (m_pReturnBuffer)
{
// First half in eax, second half in edx
memcpy(pRegisters->m_eax, m_pReturnBuffer, 4);
memcpy(pRegisters->m_edx, (void *) ((unsigned long) m_pReturnBuffer + 4), 4);
}
}

View File

@ -0,0 +1,84 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _X86_MS_CDECL_H
#define _X86_MS_CDECL_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "../convention.h"
// ============================================================================
// >> CLASSES
// ============================================================================
/*
Source: DynCall manual and Windows docs
Registers:
- eax = return value
- edx = return value
- esp = stack pointer
- st0 = floating point return value
Parameter passing:
- stack parameter order: right-to-left
- caller cleans up the stack
- all arguments are pushed onto the stack
- alignment: 4 bytes
Return values:
- return values of pointer or intergral type (<= 32 bits) are returned via the eax register
- integers > 32 bits are returned via the eax and edx registers
- floating pointer types are returned via the st0 register
*/
class x86MsCdecl: public ICallingConvention
{
public:
x86MsCdecl(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment=4);
~x86MsCdecl();
virtual std::list<Register_t> GetRegisters();
virtual int GetPopSize();
virtual int GetArgStackSize();
virtual void** GetStackArgumentPtr(CRegisters* pRegisters);
virtual void* GetArgumentPtr(int iIndex, CRegisters* pRegisters);
virtual void ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr);
virtual void* GetReturnPtr(CRegisters* pRegisters);
virtual void ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr);
private:
void* m_pReturnBuffer;
};
#endif // _X86_MS_CDECL_H

View File

@ -0,0 +1,152 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "x86MsStdcall.h"
#include <string.h>
// ============================================================================
// >> x86MsStdcall
// ============================================================================
x86MsStdcall::x86MsStdcall(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment) :
ICallingConvention(vecArgTypes, returnType, iAlignment)
{
if (m_returnType.size > 4)
{
m_pReturnBuffer = malloc(m_returnType.size);
}
else
{
m_pReturnBuffer = NULL;
}
}
x86MsStdcall::~x86MsStdcall()
{
if (m_pReturnBuffer)
{
free(m_pReturnBuffer);
}
}
std::list<Register_t> x86MsStdcall::GetRegisters()
{
std::list<Register_t> registers;
registers.push_back(ESP);
if (m_returnType.type == DATA_TYPE_FLOAT || m_returnType.type == DATA_TYPE_DOUBLE)
{
registers.push_back(ST0);
}
else
{
registers.push_back(EAX);
if (m_pReturnBuffer)
{
registers.push_back(EDX);
}
}
return registers;
}
int x86MsStdcall::GetPopSize()
{
int iPopSize = 0;
for(unsigned int i=0; i < m_vecArgTypes.size(); i++)
{
iPopSize += m_vecArgTypes[i].size;
}
return iPopSize;
}
int x86MsStdcall::GetArgStackSize()
{
int iArgStackSize = 0;
for (unsigned int i = 0; i < m_vecArgTypes.size(); i++)
{
iArgStackSize += m_vecArgTypes[i].size;
}
return iArgStackSize;
}
void** x86MsStdcall::GetStackArgumentPtr(CRegisters* pRegisters)
{
return (void **)(pRegisters->m_esp->GetValue<unsigned long>() + 4);
}
void* x86MsStdcall::GetArgumentPtr(int iIndex, CRegisters* pRegisters)
{
int iOffset = 4;
for(int i=0; i < iIndex; i++)
{
iOffset += m_vecArgTypes[i].size;
}
return (void *) (pRegisters->m_esp->GetValue<unsigned long>() + iOffset);
}
void x86MsStdcall::ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr)
{
}
void* x86MsStdcall::GetReturnPtr(CRegisters* pRegisters)
{
if (m_returnType.type == DATA_TYPE_FLOAT || m_returnType.type == DATA_TYPE_DOUBLE)
return pRegisters->m_st0->m_pAddress;
if (m_pReturnBuffer)
{
// First half in eax, second half in edx
memcpy(m_pReturnBuffer, pRegisters->m_eax, 4);
memcpy((void *) ((unsigned long) m_pReturnBuffer + 4), pRegisters->m_edx, 4);
return m_pReturnBuffer;
}
return pRegisters->m_eax->m_pAddress;
}
void x86MsStdcall::ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr)
{
if (m_pReturnBuffer)
{
// First half in eax, second half in edx
memcpy(pRegisters->m_eax, m_pReturnBuffer, 4);
memcpy(pRegisters->m_edx, (void *) ((unsigned long) m_pReturnBuffer + 4), 4);
}
}

View File

@ -0,0 +1,84 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _X86_MS_STDCALL_H
#define _X86_MS_STDCALL_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "../convention.h"
// ============================================================================
// >> CLASSES
// ============================================================================
/*
Source: DynCall manual and Windows docs
Registers:
- eax = return value
- edx = return value
- esp = stack pointer
- st0 = floating point return value
Parameter passing:
- stack parameter order: right-to-left
- callee cleans up the stack
- all arguments are pushed onto the stack
- alignment: 4 bytes
Return values:
- return values of pointer or intergral type (<= 32 bits) are returned via the eax register
- integers > 32 bits are returned via the eax and edx registers
- floating pointer types are returned via the st0 register
*/
class x86MsStdcall: public ICallingConvention
{
public:
x86MsStdcall(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment=4);
~x86MsStdcall();
virtual std::list<Register_t> GetRegisters();
virtual int GetPopSize();
virtual int GetArgStackSize();
virtual void** GetStackArgumentPtr(CRegisters* pRegisters);
virtual void* GetArgumentPtr(int iIndex, CRegisters* pRegisters);
virtual void ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr);
virtual void* GetReturnPtr(CRegisters* pRegisters);
virtual void ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr);
private:
void* m_pReturnBuffer;
};
#endif // _X86_MS_STDCALL_H

View File

@ -0,0 +1,161 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "x86MsThiscall.h"
#include <string.h>
// ============================================================================
// >> x86MsThiscall
// ============================================================================
x86MsThiscall::x86MsThiscall(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment) :
ICallingConvention(vecArgTypes, returnType, iAlignment)
{
if (m_returnType.size > 4)
{
m_pReturnBuffer = malloc(m_returnType.size);
}
else
{
m_pReturnBuffer = NULL;
}
}
x86MsThiscall::~x86MsThiscall()
{
if (m_pReturnBuffer)
{
free(m_pReturnBuffer);
}
}
std::list<Register_t> x86MsThiscall::GetRegisters()
{
std::list<Register_t> registers;
registers.push_back(ESP);
registers.push_back(ECX);
if (m_returnType.type == DATA_TYPE_FLOAT || m_returnType.type == DATA_TYPE_DOUBLE)
{
registers.push_back(ST0);
}
else
{
registers.push_back(EAX);
if (m_pReturnBuffer)
{
registers.push_back(EDX);
}
}
return registers;
}
int x86MsThiscall::GetPopSize()
{
// This pointer.
// FIXME LINUX
//int iPopSize = GetDataTypeSize(DATA_TYPE_POINTER, m_iAlignment);
int iPopSize = 0;
for(unsigned int i=0; i < m_vecArgTypes.size(); i++)
{
iPopSize += m_vecArgTypes[i].size;
}
return iPopSize;
}
int x86MsThiscall::GetArgStackSize()
{
int iArgStackSize = 0;
for (unsigned int i = 0; i < m_vecArgTypes.size(); i++)
{
iArgStackSize += m_vecArgTypes[i].size;
}
return iArgStackSize;
}
void** x86MsThiscall::GetStackArgumentPtr(CRegisters* pRegisters)
{
return (void **)(pRegisters->m_esp->GetValue<unsigned long>() + 4);
}
void* x86MsThiscall::GetArgumentPtr(int iIndex, CRegisters* pRegisters)
{
if (iIndex == 0)
{
return pRegisters->m_ecx->m_pAddress;
}
int iOffset = 4;
for(int i=0; i < iIndex-1; i++)
{
iOffset += m_vecArgTypes[i].size;
}
return (void *) (pRegisters->m_esp->GetValue<unsigned long>() + iOffset);
}
void x86MsThiscall::ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr)
{
}
void* x86MsThiscall::GetReturnPtr(CRegisters* pRegisters)
{
if (m_returnType.type == DATA_TYPE_FLOAT || m_returnType.type == DATA_TYPE_DOUBLE)
return pRegisters->m_st0->m_pAddress;
if (m_pReturnBuffer)
{
// First half in eax, second half in edx
memcpy(m_pReturnBuffer, pRegisters->m_eax, 4);
memcpy((void *) ((unsigned long) m_pReturnBuffer + 4), pRegisters->m_edx, 4);
return m_pReturnBuffer;
}
return pRegisters->m_eax->m_pAddress;
}
void x86MsThiscall::ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr)
{
if (m_pReturnBuffer)
{
// First half in eax, second half in edx
memcpy(pRegisters->m_eax, m_pReturnBuffer, 4);
memcpy(pRegisters->m_edx, (void *) ((unsigned long) m_pReturnBuffer + 4), 4);
}
}

View File

@ -0,0 +1,85 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _X86_MS_THISCALL_H
#define _X86_MS_THISCALL_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "../convention.h"
// ============================================================================
// >> CLASSES
// ============================================================================
/*
Source: DynCall manual and Windows docs
Registers:
- eax = return value
- ecx = this pointer
- edx = return value
- esp = stack pointer
- st0 = floating point return value
Parameter passing:
- stack parameter order: right-to-left
- callee cleans up the stack
- all other arguments are pushed onto the stack
- alignment: 4 bytes
Return values:
- return values of pointer or intergral type (<= 32 bits) are returned via the eax register
- integers > 32 bits are returned via the eax and edx registers
- floating pointer types are returned via the st0 register
*/
class x86MsThiscall: public ICallingConvention
{
public:
x86MsThiscall(std::vector<DataTypeSized_t> vecArgTypes, DataTypeSized_t returnType, int iAlignment=4);
~x86MsThiscall();
virtual std::list<Register_t> GetRegisters();
virtual int GetPopSize();
virtual int x86MsThiscall::GetArgStackSize();
virtual void** GetStackArgumentPtr(CRegisters* pRegisters);
virtual void* GetArgumentPtr(int iIndex, CRegisters* pRegisters);
virtual void ArgumentPtrChanged(int iIndex, CRegisters* pRegisters, void* pArgumentPtr);
virtual void* GetReturnPtr(CRegisters* pRegisters);
virtual void ReturnPtrChanged(CRegisters* pRegisters, void* pReturnPtr);
private:
void* m_pReturnBuffer;
};
#endif // _X86_MS_THISCALL_H

634
DynamicHooks/hook.cpp Normal file
View File

@ -0,0 +1,634 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "hook.h"
#include "utilities.h"
#include "asm.h"
using namespace asmjit;
using namespace asmjit::x86;
// ============================================================================
// >> DEFINITIONS
// ============================================================================
#define JMP_SIZE 6
// ============================================================================
// >> CHook
// ============================================================================
CHook::CHook(void* pFunc, ICallingConvention* pConvention)
{
m_pFunc = pFunc;
m_pRegisters = new CRegisters(pConvention->GetRegisters());
m_pCallingConvention = pConvention;
unsigned char* pTarget = (unsigned char *) pFunc;
// Determine the number of bytes we need to copy
int iBytesToCopy = copy_bytes(pTarget, NULL, JMP_SIZE);
// Create an array for the bytes to copy + a jump to the rest of the
// function.
unsigned char* pCopiedBytes = new unsigned char[iBytesToCopy + JMP_SIZE];
// Fill the array with NOP instructions
memset(pCopiedBytes, 0x90, iBytesToCopy + JMP_SIZE);
// Copy the required bytes to our array
SetMemPatchable(pCopiedBytes, iBytesToCopy + JMP_SIZE);
copy_bytes(pTarget, pCopiedBytes, JMP_SIZE);
// Write a jump after the copied bytes to the function/bridge + number of bytes to copy
WriteJMP(pCopiedBytes + iBytesToCopy, pTarget + iBytesToCopy);
// Save the trampoline
m_pTrampoline = (void *) pCopiedBytes;
// Create the bridge function
m_pBridge = CreateBridge();
// Write a jump to the bridge
WriteJMP((unsigned char *) pFunc, m_pBridge);
}
CHook::~CHook()
{
// Copy back the previously copied bytes
copy_bytes((unsigned char *) m_pTrampoline, (unsigned char *) m_pFunc, JMP_SIZE);
// Free the trampoline array
free(m_pTrampoline);
// Free the asm bridge and new return address
m_Runtime.release(m_pBridge);
m_Runtime.release(m_pNewRetAddr);
delete m_pRegisters;
delete m_pCallingConvention;
}
void CHook::AddCallback(HookType_t eHookType, HookHandlerFn* pCallback)
{
if (!pCallback)
return;
if (!IsCallbackRegistered(eHookType, pCallback))
m_hookHandler[eHookType].push_back(pCallback);
}
void CHook::RemoveCallback(HookType_t eHookType, HookHandlerFn* pCallback)
{
if (IsCallbackRegistered(eHookType, pCallback))
m_hookHandler[eHookType].remove(pCallback);
}
bool CHook::IsCallbackRegistered(HookType_t eHookType, HookHandlerFn* pCallback)
{
std::list<HookHandlerFn *> callbacks = m_hookHandler[eHookType];
for(std::list<HookHandlerFn *>::iterator it=callbacks.begin(); it != callbacks.end(); it++)
{
if (*it == pCallback)
return true;
}
return false;
}
bool CHook::AreCallbacksRegistered()
{
return !m_hookHandler[HOOKTYPE_PRE].empty() || !m_hookHandler[HOOKTYPE_POST].empty();
}
bool CHook::HookHandler(HookType_t eHookType)
{
bool bOverride = false;
std::list<HookHandlerFn *> callbacks = this->m_hookHandler[eHookType];
for(std::list<HookHandlerFn *>::iterator it=callbacks.begin(); it != callbacks.end(); it++)
{
bool result = ((HookHandlerFn) *it)(eHookType, this);
if (result)
bOverride = true;
}
return bOverride;
}
void* __cdecl CHook::GetReturnAddress(void* pESP)
{
if (m_RetAddr.count(pESP) == 0)
puts("ESP not present.");
return m_RetAddr[pESP];
}
void __cdecl CHook::SetReturnAddress(void* pRetAddr, void* pESP)
{
m_RetAddr[pESP] = pRetAddr;
}
void* CHook::CreateBridge()
{
X86Assembler a(&m_Runtime);
Label label_supercede = a.newLabel();
// Write a redirect to the post-hook code
Write_ModifyReturnAddress(a);
// Call the pre-hook handler and jump to label_supercede if true was returned
Write_CallHandler(a, HOOKTYPE_PRE);
a.cmp(eax.r8(), true);
// Restore the previously saved registers, so any changes will be applied
Write_RestoreRegisters(a);
a.je(label_supercede);
// Jump to the trampoline
a.jmp(Ptr(m_pTrampoline));
// This code will be executed if a pre-hook returns true
a.bind(label_supercede);
// Finally, return to the caller
// This will still call post hooks, but will skip the original function.
a.ret(imm(m_pCallingConvention->GetPopSize()));
return a.make();
}
void CHook::Write_ModifyReturnAddress(X86Assembler& a)
{
// Save scratch registers that are used by SetReturnAddress
static void* pEAX = NULL;
static void* pECX = NULL;
static void* pEDX = NULL;
a.mov(dword_ptr_abs(Ptr(&pEAX)), eax);
a.mov(dword_ptr_abs(Ptr(&pECX)), ecx);
a.mov(dword_ptr_abs(Ptr(&pEDX)), edx);
// Store the return address in eax
a.mov(eax, dword_ptr(esp));
// Save the original return address by using the current esp as the key.
// This should be unique until we have returned to the original caller.
void (__cdecl CHook::*SetReturnAddress)(void*, void*) = &CHook::SetReturnAddress;
a.push(esp);
a.push(eax);
a.push(imm_ptr(this));
a.call(imm_ptr((void *&)SetReturnAddress));
a.add(esp, 12);
// Restore scratch registers
a.mov(eax, dword_ptr_abs(Ptr(&pEAX)));
a.mov(ecx, dword_ptr_abs(Ptr(&pECX)));
a.mov(edx, dword_ptr_abs(Ptr(&pEDX)));
// Override the return address. This is a redirect to our post-hook code
m_pNewRetAddr = CreatePostCallback();
a.mov(dword_ptr(esp), imm_ptr(m_pNewRetAddr));
}
void* CHook::CreatePostCallback()
{
X86Assembler a(&m_Runtime);
int iPopSize = m_pCallingConvention->GetPopSize();
// Subtract the previously added bytes (stack size + return address), so
// that we can access the arguments again
a.sub(esp, imm(iPopSize+4));
// Call the post-hook handler
Write_CallHandler(a, HOOKTYPE_POST);
// Restore the previously saved registers, so any changes will be applied
Write_RestoreRegisters(a);
// Save scratch registers that are used by GetReturnAddress
static void* pEAX = NULL;
static void* pECX = NULL;
static void* pEDX = NULL;
a.mov(dword_ptr_abs(Ptr(&pEAX)), eax);
a.mov(dword_ptr_abs(Ptr(&pECX)), ecx);
a.mov(dword_ptr_abs(Ptr(&pEDX)), edx);
// Get the original return address
void* (__cdecl CHook::*GetReturnAddress)(void*) = &CHook::GetReturnAddress;
a.push(esp);
a.push(imm_ptr(this));
a.call(imm_ptr((void *&)GetReturnAddress));
a.add(esp, 8);
// Save the original return address
static void* pRetAddr = NULL;
a.mov(dword_ptr_abs(Ptr(&pRetAddr)), eax);
// Restore scratch registers
a.mov(eax, dword_ptr_abs(Ptr(&pEAX)));
a.mov(ecx, dword_ptr_abs(Ptr(&pECX)));
a.mov(edx, dword_ptr_abs(Ptr(&pEDX)));
// Add the bytes again to the stack (stack size + return address), so we
// don't corrupt the stack.
a.add(esp, imm(iPopSize+4));
// Jump to the original return address
a.jmp(dword_ptr_abs(Ptr(&pRetAddr)));
// Generate the code
return a.make();
}
void CHook::Write_CallHandler(X86Assembler& a, HookType_t type)
{
bool (__cdecl CHook::*HookHandler)(HookType_t) = &CHook::HookHandler;
// Save the registers so that we can access them in our handlers
Write_SaveRegisters(a);
// Call the global hook handler
a.push(type);
a.push(imm_ptr(this));
a.call(imm_ptr((void *&)HookHandler));
a.add(esp, 8);
}
void CHook::Write_SaveRegisters(X86Assembler& a)
{
std::list<Register_t> vecRegistersToSave = m_pCallingConvention->GetRegisters();
for(std::list<Register_t>::iterator it=vecRegistersToSave.begin(); it != vecRegistersToSave.end(); it++)
{
switch(*it)
{
// ========================================================================
// >> 8-bit General purpose registers
// ========================================================================
case AL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_al->m_pAddress)), al); break;
case CL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_cl->m_pAddress)), cl); break;
case DL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_dl->m_pAddress)), dl); break;
case BL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_bl->m_pAddress)), bl); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case SPL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_spl->m_pAddress)), spl); break;
case BPL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_bpl->m_pAddress)), bpl); break;
case SIL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_sil->m_pAddress)), sil); break;
case DIL: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_dil->m_pAddress)), dil); break;
case R8B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r8b->m_pAddress)), r8b); break;
case R9B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r9b->m_pAddress)), r9b); break;
case R10B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r10b->m_pAddress)), r10b); break;
case R11B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r11b->m_pAddress)), r11b); break;
case R12B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r12b->m_pAddress)), r12b); break;
case R13B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r13b->m_pAddress)), r13b); break;
case R14B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r14b->m_pAddress)), r14b); break;
case R15B: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_r15b->m_pAddress)), r15b); break;
#endif // ASMJIT_X64
case AH: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_ah->m_pAddress)), ah); break;
case CH: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_ch->m_pAddress)), ch); break;
case DH: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_dh->m_pAddress)), dh); break;
case BH: a.mov(byte_ptr_abs(Ptr(m_pRegisters->m_bh->m_pAddress)), bh); break;
// ========================================================================
// >> 16-bit General purpose registers
// ========================================================================
case AX: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_ax->m_pAddress)), ax); break;
case CX: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_cx->m_pAddress)), cx); break;
case DX: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_dx->m_pAddress)), dx); break;
case BX: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_bx->m_pAddress)), bx); break;
case SP: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_sp->m_pAddress)), x86::sp); break;
case BP: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_bp->m_pAddress)), bp); break;
case SI: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_si->m_pAddress)), si); break;
case DI: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_di->m_pAddress)), di); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case R8W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r8w->m_pAddress)), r8w); break;
case R9W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r9w->m_pAddress)), r9w); break;
case R10W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r10w->m_pAddress)), r10w); break;
case R11W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r11w->m_pAddress)), r11w); break;
case R12W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r12w->m_pAddress)), r12w); break;
case R13W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r13w->m_pAddress)), r13w); break;
case R14W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r14w->m_pAddress)), r14w); break;
case R15W: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_r15w->m_pAddress)), r15w); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 32-bit General purpose registers
// ========================================================================
case EAX: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_eax->m_pAddress)), eax); break;
case ECX: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_ecx->m_pAddress)), ecx); break;
case EDX: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_edx->m_pAddress)), edx); break;
case EBX: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_ebx->m_pAddress)), ebx); break;
case ESP: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_esp->m_pAddress)), esp); break;
case EBP: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_ebp->m_pAddress)), ebp); break;
case ESI: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_esi->m_pAddress)), esi); break;
case EDI: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_edi->m_pAddress)), edi); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case R8D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r8d->m_pAddress)), r8d); break;
case R9D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r9d->m_pAddress)), r9d); break;
case R10D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r10d->m_pAddress)), r10d); break;
case R11D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r11d->m_pAddress)), r11d); break;
case R12D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r12d->m_pAddress)), r12d); break;
case R13D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r13d->m_pAddress)), r13d); break;
case R14D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r14d->m_pAddress)), r14d); break;
case R15D: a.mov(dword_ptr_abs(Ptr(m_pRegisters->m_r15d->m_pAddress)), r15d); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 64-bit General purpose registers
// ========================================================================
#if defined(ASMJIT_X64)
// 64-bit mode only
case RAX: a.mov(qword_ptr_abs((m_pRegisters->m_rax->m_pAddress)), rax); break;
case RCX: a.mov(qword_ptr_abs((m_pRegisters->m_rcx->m_pAddress)), rcx); break;
case RDX: a.mov(qword_ptr_abs((m_pRegisters->m_rdx->m_pAddress)), rdx); break;
case RBX: a.mov(qword_ptr_abs((m_pRegisters->m_rbx->m_pAddress)), rbx); break;
case RSP: a.mov(qword_ptr_abs((m_pRegisters->m_rsp->m_pAddress)), rsp); break;
case RBP: a.mov(qword_ptr_abs((m_pRegisters->m_rbp->m_pAddress)), rbp); break;
case RSI: a.mov(qword_ptr_abs((m_pRegisters->m_rsi->m_pAddress)), rsi); break;
case RDI: a.mov(qword_ptr_abs((m_pRegisters->m_rdi->m_pAddress)), rdi); break;
#endif // ASMJIT_X64
#if defined(ASMJIT_X64)
// 64-bit mode only
case R8: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r8->m_pAddress)), r8); break;
case R9: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r9->m_pAddress)), r9); break;
case R10: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r10->m_pAddress)), r10); break;
case R11: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r11->m_pAddress)), r11); break;
case R12: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r12->m_pAddress)), r12); break;
case R13: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r13->m_pAddress)), r13); break;
case R14: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r14->m_pAddress)), r14); break;
case R15: a.mov(qword_ptr_abs(Ptr(m_pRegisters->m_r15->m_pAddress)), r15); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 64-bit MM (MMX) registers
// ========================================================================
case MM0: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm0->m_pAddress)), mm0); break;
case MM1: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm1->m_pAddress)), mm1); break;
case MM2: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm2->m_pAddress)), mm2); break;
case MM3: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm3->m_pAddress)), mm3); break;
case MM4: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm4->m_pAddress)), mm4); break;
case MM5: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm5->m_pAddress)), mm5); break;
case MM6: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm6->m_pAddress)), mm6); break;
case MM7: a.movq(qword_ptr_abs(Ptr(m_pRegisters->m_mm7->m_pAddress)), mm7); break;
// ========================================================================
// >> 128-bit XMM registers
// ========================================================================
// TODO: Also provide movups?
case XMM0: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm0->m_pAddress)), xmm0); break;
case XMM1: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm1->m_pAddress)), xmm1); break;
case XMM2: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm2->m_pAddress)), xmm2); break;
case XMM3: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm3->m_pAddress)), xmm3); break;
case XMM4: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm4->m_pAddress)), xmm4); break;
case XMM5: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm5->m_pAddress)), xmm5); break;
case XMM6: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm6->m_pAddress)), xmm6); break;
case XMM7: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm7->m_pAddress)), xmm7); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case XMM8: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm8->m_pAddress)), xmm8); break;
case XMM9: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm9->m_pAddress)), xmm9); break;
case XMM10: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm10->m_pAddress)), xmm10); break;
case XMM11: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm11->m_pAddress)), xmm11); break;
case XMM12: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm12->m_pAddress)), xmm12); break;
case XMM13: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm13->m_pAddress)), xmm13); break;
case XMM14: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm14->m_pAddress)), xmm14); break;
case XMM15: a.movaps(qword_ptr_abs(Ptr(m_pRegisters->m_xmm15->m_pAddress)), xmm15); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 16-bit Segment registers
// ========================================================================
case CS: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_cs->m_pAddress)), cs); break;
case SS: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_ss->m_pAddress)), ss); break;
case DS: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_ds->m_pAddress)), ds); break;
case ES: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_es->m_pAddress)), es); break;
case FS: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_fs->m_pAddress)), fs); break;
case GS: a.mov(word_ptr_abs(Ptr(m_pRegisters->m_gs->m_pAddress)), gs); break;
// ========================================================================
// >> 80-bit FPU registers
// ========================================================================
case ST0: a.fst(dword_ptr_abs(Ptr(m_pRegisters->m_st0->m_pAddress))); break;
//case ST1: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st1->m_pAddress)), st1); break;
//case ST2: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st2->m_pAddress)), st2); break;
//case ST3: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st3->m_pAddress)), st3); break;
//case ST4: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st4->m_pAddress)), st4); break;
//case ST5: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st5->m_pAddress)), st5); break;
//case ST6: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st6->m_pAddress)), st6); break;
//case ST7: a.mov(tword_ptr_abs(Ptr(m_pRegisters->m_st7->m_pAddress)), st7); break;
default: puts("Unsupported register.");
}
}
}
void CHook::Write_RestoreRegisters(X86Assembler& a)
{
std::list<Register_t> vecRegistersToSave = m_pCallingConvention->GetRegisters();
for(std::list<Register_t>::iterator it=vecRegistersToSave.begin(); it != vecRegistersToSave.end(); it++)
{
switch(*it)
{
// ========================================================================
// >> 8-bit General purpose registers
// ========================================================================
case AL: a.mov(al, byte_ptr_abs(Ptr(m_pRegisters->m_al->m_pAddress))); break;
case CL: a.mov(cl, byte_ptr_abs(Ptr(m_pRegisters->m_cl->m_pAddress))); break;
case DL: a.mov(dl, byte_ptr_abs(Ptr(m_pRegisters->m_dl->m_pAddress))); break;
case BL: a.mov(bl, byte_ptr_abs(Ptr(m_pRegisters->m_bl->m_pAddress))); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case SPL: a.mov(spl, byte_ptr_abs(Ptr(m_pRegisters->m_spl->m_pAddress))); break;
case BPL: a.mov(bpl, byte_ptr_abs(Ptr(m_pRegisters->m_bpl->m_pAddress))); break;
case SIL: a.mov(sil, byte_ptr_abs(Ptr(m_pRegisters->m_sil->m_pAddress))); break;
case DIL: a.mov(dil, byte_ptr_abs(Ptr(m_pRegisters->m_dil->m_pAddress))); break;
case R8B: a.mov(r8b, byte_ptr_abs(Ptr(m_pRegisters->m_r8b->m_pAddress))); break;
case R9B: a.mov(r9b, byte_ptr_abs(Ptr(m_pRegisters->m_r9b->m_pAddress))); break;
case R10B: a.mov(r10b, byte_ptr_abs(Ptr(m_pRegisters->m_r10b->m_pAddress))); break;
case R11B: a.mov(r11b, byte_ptr_abs(Ptr(m_pRegisters->m_r11b->m_pAddress))); break;
case R12B: a.mov(r12b, byte_ptr_abs(Ptr(m_pRegisters->m_r12b->m_pAddress))); break;
case R13B: a.mov(r13b, byte_ptr_abs(Ptr(m_pRegisters->m_r13b->m_pAddress))); break;
case R14B: a.mov(r14b, byte_ptr_abs(Ptr(m_pRegisters->m_r14b->m_pAddress))); break;
case R15B: a.mov(r15b, byte_ptr_abs(Ptr(m_pRegisters->m_r15b->m_pAddress))); break;
#endif // ASMJIT_X64
case AH: a.mov(ah, byte_ptr_abs(Ptr(m_pRegisters->m_ah->m_pAddress))); break;
case CH: a.mov(ch, byte_ptr_abs(Ptr(m_pRegisters->m_ch->m_pAddress))); break;
case DH: a.mov(dh, byte_ptr_abs(Ptr(m_pRegisters->m_dh->m_pAddress))); break;
case BH: a.mov(bh, byte_ptr_abs(Ptr(m_pRegisters->m_bh->m_pAddress))); break;
// ========================================================================
// >> 16-bit General purpose registers
// ========================================================================
case AX: a.mov(ax, word_ptr_abs(Ptr(m_pRegisters->m_ax->m_pAddress))); break;
case CX: a.mov(cx, word_ptr_abs(Ptr(m_pRegisters->m_cx->m_pAddress))); break;
case DX: a.mov(dx, word_ptr_abs(Ptr(m_pRegisters->m_dx->m_pAddress))); break;
case BX: a.mov(bx, word_ptr_abs(Ptr(m_pRegisters->m_bx->m_pAddress))); break;
case SP: a.mov(x86::sp, word_ptr_abs(Ptr(m_pRegisters->m_sp->m_pAddress))); break;
case BP: a.mov(bp, word_ptr_abs(Ptr(m_pRegisters->m_bp->m_pAddress))); break;
case SI: a.mov(si, word_ptr_abs(Ptr(m_pRegisters->m_si->m_pAddress))); break;
case DI: a.mov(di, word_ptr_abs(Ptr(m_pRegisters->m_di->m_pAddress))); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case R8W: a.mov(r8w, word_ptr_abs(Ptr(m_pRegisters->m_r8w->m_pAddress))); break;
case R9W: a.mov(r9w, word_ptr_abs(Ptr(m_pRegisters->m_r9w->m_pAddress))); break;
case R10W: a.mov(r10w, word_ptr_abs(Ptr(m_pRegisters->m_r10w->m_pAddress))); break;
case R11W: a.mov(r11w, word_ptr_abs(Ptr(m_pRegisters->m_r11w->m_pAddress))); break;
case R12W: a.mov(r12w, word_ptr_abs(Ptr(m_pRegisters->m_r12w->m_pAddress))); break;
case R13W: a.mov(r13w, word_ptr_abs(Ptr(m_pRegisters->m_r13w->m_pAddress))); break;
case R14W: a.mov(r14w, word_ptr_abs(Ptr(m_pRegisters->m_r14w->m_pAddress))); break;
case R15W: a.mov(r15w, word_ptr_abs(Ptr(m_pRegisters->m_r15w->m_pAddress))); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 32-bit General purpose registers
// ========================================================================
case EAX: a.mov(eax, dword_ptr_abs(Ptr(m_pRegisters->m_eax->m_pAddress))); break;
case ECX: a.mov(ecx, dword_ptr_abs(Ptr(m_pRegisters->m_ecx->m_pAddress))); break;
case EDX: a.mov(edx, dword_ptr_abs(Ptr(m_pRegisters->m_edx->m_pAddress))); break;
case EBX: a.mov(ebx, dword_ptr_abs(Ptr(m_pRegisters->m_ebx->m_pAddress))); break;
case ESP: a.mov(esp, dword_ptr_abs(Ptr(m_pRegisters->m_esp->m_pAddress))); break;
case EBP: a.mov(ebp, dword_ptr_abs(Ptr(m_pRegisters->m_ebp->m_pAddress))); break;
case ESI: a.mov(esi, dword_ptr_abs(Ptr(m_pRegisters->m_esi->m_pAddress))); break;
case EDI: a.mov(edi, dword_ptr_abs(Ptr(m_pRegisters->m_edi->m_pAddress))); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case R8D: a.mov(r8d, qword_ptr_abs(Ptr(m_pRegisters->m_r8d->m_pAddress))); break;
case R9D: a.mov(r9d, qword_ptr_abs(Ptr(m_pRegisters->m_r9d->m_pAddress))); break;
case R10D: a.mov(r10d, qword_ptr_abs(Ptr(m_pRegisters->m_r10d->m_pAddress))); break;
case R11D: a.mov(r11d, qword_ptr_abs(Ptr(m_pRegisters->m_r11d->m_pAddress))); break;
case R12D: a.mov(r12d, qword_ptr_abs(Ptr(m_pRegisters->m_r12d->m_pAddress))); break;
case R13D: a.mov(r13d, qword_ptr_abs(Ptr(m_pRegisters->m_r13d->m_pAddress))); break;
case R14D: a.mov(r14d, qword_ptr_abs(Ptr(m_pRegisters->m_r14d->m_pAddress))); break;
case R15D: a.mov(r15d, qword_ptr_abs(Ptr(m_pRegisters->m_r15d->m_pAddress))); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 64-bit General purpose registers
// ========================================================================
#if defined(ASMJIT_X64)
// 64-bit mode only
case RAX: a.mov(rax, qword_ptr_abs(Ptr(m_pRegisters->m_rax->m_pAddress))); break;
case RCX: a.mov(rcx, qword_ptr_abs(Ptr(m_pRegisters->m_rcx->m_pAddress))); break;
case RDX: a.mov(rdx, qword_ptr_abs(Ptr(m_pRegisters->m_rdx->m_pAddress))); break;
case RBX: a.mov(rbx, qword_ptr_abs(Ptr(m_pRegisters->m_rbx->m_pAddress))); break;
case RSP: a.mov(rsp, qword_ptr_abs(Ptr(m_pRegisters->m_rsp->m_pAddress))); break;
case RBP: a.mov(rbp, qword_ptr_abs(Ptr(m_pRegisters->m_rbp->m_pAddress))); break;
case RSI: a.mov(rsi, qword_ptr_abs(Ptr(m_pRegisters->m_rsi->m_pAddress))); break;
case RDI: a.mov(rdi, qword_ptr_abs(Ptr(m_pRegisters->m_rdi->m_pAddress))); break;
#endif // ASMJIT_X64
#if defined(ASMJIT_X64)
// 64-bit mode only
case R8: a.mov(r8, qword_ptr_abs(Ptr(m_pRegisters->m_r8->m_pAddress))); break;
case R9: a.mov(r9, qword_ptr_abs(Ptr(m_pRegisters->m_r9->m_pAddress))); break;
case R10: a.mov(r10, qword_ptr_abs(Ptr(m_pRegisters->m_r10->m_pAddress))); break;
case R11: a.mov(r11, qword_ptr_abs(Ptr(m_pRegisters->m_r11->m_pAddress))); break;
case R12: a.mov(r12, qword_ptr_abs(Ptr(m_pRegisters->m_r12->m_pAddress))); break;
case R13: a.mov(r13, qword_ptr_abs(Ptr(m_pRegisters->m_r13->m_pAddress))); break;
case R14: a.mov(r14, qword_ptr_abs(Ptr(m_pRegisters->m_r14->m_pAddress))); break;
case R15: a.mov(r15, qword_ptr_abs(Ptr(m_pRegisters->m_r15->m_pAddress))); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 64-bit MM (MMX) registers
// ========================================================================
case MM0: a.movq(mm0, qword_ptr_abs(Ptr(m_pRegisters->m_mm0->m_pAddress))); break;
case MM1: a.movq(mm1, qword_ptr_abs(Ptr(m_pRegisters->m_mm1->m_pAddress))); break;
case MM2: a.movq(mm2, qword_ptr_abs(Ptr(m_pRegisters->m_mm2->m_pAddress))); break;
case MM3: a.movq(mm3, qword_ptr_abs(Ptr(m_pRegisters->m_mm3->m_pAddress))); break;
case MM4: a.movq(mm4, qword_ptr_abs(Ptr(m_pRegisters->m_mm4->m_pAddress))); break;
case MM5: a.movq(mm5, qword_ptr_abs(Ptr(m_pRegisters->m_mm5->m_pAddress))); break;
case MM6: a.movq(mm6, qword_ptr_abs(Ptr(m_pRegisters->m_mm6->m_pAddress))); break;
case MM7: a.movq(mm7, qword_ptr_abs(Ptr(m_pRegisters->m_mm7->m_pAddress))); break;
// ========================================================================
// >> 128-bit XMM registers
// ========================================================================
// TODO: Also provide movups?
case XMM0: a.movaps(xmm0, oword_ptr_abs(Ptr(m_pRegisters->m_xmm0->m_pAddress))); break;
case XMM1: a.movaps(xmm1, oword_ptr_abs(Ptr(m_pRegisters->m_xmm1->m_pAddress))); break;
case XMM2: a.movaps(xmm2, oword_ptr_abs(Ptr(m_pRegisters->m_xmm2->m_pAddress))); break;
case XMM3: a.movaps(xmm3, oword_ptr_abs(Ptr(m_pRegisters->m_xmm3->m_pAddress))); break;
case XMM4: a.movaps(xmm4, oword_ptr_abs(Ptr(m_pRegisters->m_xmm4->m_pAddress))); break;
case XMM5: a.movaps(xmm5, oword_ptr_abs(Ptr(m_pRegisters->m_xmm5->m_pAddress))); break;
case XMM6: a.movaps(xmm6, oword_ptr_abs(Ptr(m_pRegisters->m_xmm6->m_pAddress))); break;
case XMM7: a.movaps(xmm7, oword_ptr_abs(Ptr(m_pRegisters->m_xmm7->m_pAddress))); break;
#if defined(ASMJIT_X64)
// 64-bit mode only
case XMM8: a.movaps(xmm8, qword_ptr_abs(Ptr(m_pRegisters->m_xmm8->m_pAddress))); break;
case XMM9: a.movaps(xmm9, qword_ptr_abs(Ptr(m_pRegisters->m_xmm9->m_pAddress))); break;
case XMM10: a.movaps(xmm10, qword_ptr_abs(Ptr(m_pRegisters->m_xmm10->m_pAddress))); break;
case XMM11: a.movaps(xmm11, qword_ptr_abs(Ptr(m_pRegisters->m_xmm11->m_pAddress))); break;
case XMM12: a.movaps(xmm12, qword_ptr_abs(Ptr(m_pRegisters->m_xmm12->m_pAddress))); break;
case XMM13: a.movaps(xmm13, qword_ptr_abs(Ptr(m_pRegisters->m_xmm13->m_pAddress))); break;
case XMM14: a.movaps(xmm14, qword_ptr_abs(Ptr(m_pRegisters->m_xmm14->m_pAddress))); break;
case XMM15: a.movaps(xmm15, qword_ptr_abs(Ptr(m_pRegisters->m_xmm15->m_pAddress))); break;
#endif // ASMJIT_X64
// ========================================================================
// >> 16-bit Segment registers
// ========================================================================
case CS: a.mov(cs, word_ptr_abs(Ptr(m_pRegisters->m_cs->m_pAddress))); break;
case SS: a.mov(ss, word_ptr_abs(Ptr(m_pRegisters->m_ss->m_pAddress))); break;
case DS: a.mov(ds, word_ptr_abs(Ptr(m_pRegisters->m_ds->m_pAddress))); break;
case ES: a.mov(es, word_ptr_abs(Ptr(m_pRegisters->m_es->m_pAddress))); break;
case FS: a.mov(fs, word_ptr_abs(Ptr(m_pRegisters->m_fs->m_pAddress))); break;
case GS: a.mov(gs, word_ptr_abs(Ptr(m_pRegisters->m_gs->m_pAddress))); break;
// ========================================================================
// >> 80-bit FPU registers
// ========================================================================
case ST0: a.fld(dword_ptr_abs(Ptr(m_pRegisters->m_st0->m_pAddress))); break;
//case ST1: a.mov(st1, tword_ptr_abs(Ptr(m_pRegisters->m_st1->m_pAddress))); break;
//case ST2: a.mov(st2, tword_ptr_abs(Ptr(m_pRegisters->m_st2->m_pAddress))); break;
//case ST3: a.mov(st3, tword_ptr_abs(Ptr(m_pRegisters->m_st3->m_pAddress))); break;
//case ST4: a.mov(st4, tword_ptr_abs(Ptr(m_pRegisters->m_st4->m_pAddress))); break;
//case ST5: a.mov(st5, tword_ptr_abs(Ptr(m_pRegisters->m_st5->m_pAddress))); break;
//case ST6: a.mov(st6, tword_ptr_abs(Ptr(m_pRegisters->m_st6->m_pAddress))); break;
//case ST7: a.mov(st7, tword_ptr_abs(Ptr(m_pRegisters->m_st7->m_pAddress))); break;
default: puts("Unsupported register.");
}
}
}

187
DynamicHooks/hook.h Normal file
View File

@ -0,0 +1,187 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _HOOK_H
#define _HOOK_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include <list>
#include <map>
#include "registers.h"
#include "convention.h"
#include "AsmJit/asmjit.h"
// ============================================================================
// >> HookType_t
// ============================================================================
enum HookType_t
{
// Callback will be executed before the original function.
HOOKTYPE_PRE,
// Callback will be executed after the original function.
HOOKTYPE_POST
};
// ============================================================================
// >> TYPEDEFS
// ============================================================================
class CHook;
typedef bool (*HookHandlerFn)(HookType_t, CHook*);
#ifdef __linux__
#define __cdecl
#endif
// ============================================================================
// >> CLASSES
// ============================================================================
class CHook
{
private:
friend class CHookManager;
/*
Creates a new function hook.
@param <pFunc>:
The address of the function to hook
@param <pConvention>:
The calling convention of <pFunc>.
*/
CHook(void* pFunc, ICallingConvention* pConvention);
~CHook();
public:
/*
Adds a hook handler to the hook.
@param type The hook type.
@param pFunc The hook handler that should be added.
*/
void AddCallback(HookType_t type, HookHandlerFn* pFunc);
/*
Removes a hook handler to the hook.
@param type The hook type.
@param pFunc The hook handler that should be removed.
*/
void RemoveCallback(HookType_t type, HookHandlerFn* pFunc);
/*
Checks if a hook handler is already added.
@param type The hook type.
@param pFunc The hook handler that should be checked.
*/
bool IsCallbackRegistered(HookType_t type, HookHandlerFn* pFunc);
/*
Checks if there are any hook handlers added to this hook.
*/
bool AreCallbacksRegistered();
template<class T>
T GetArgument(int iIndex)
{
return *(T *) m_pCallingConvention->GetArgumentPtr(iIndex, m_pRegisters);
}
template<class T>
void SetArgument(int iIndex, T value)
{
void* pPtr = m_pCallingConvention->GetArgumentPtr(iIndex, m_pRegisters);
*(T *) pPtr = value;
m_pCallingConvention->ArgumentPtrChanged(iIndex, m_pRegisters, pPtr);
}
template<class T>
T GetReturnValue()
{
return *(T *) m_pCallingConvention->GetReturnPtr(m_pRegisters);
}
template<class T>
void SetReturnValue(T value)
{
void* pPtr = m_pCallingConvention->GetReturnPtr(m_pRegisters);
*(T *) pPtr = value;
m_pCallingConvention->ReturnPtrChanged(m_pRegisters, pPtr);
}
private:
void* CreateBridge();
void Write_ModifyReturnAddress(asmjit::X86Assembler& a);
void Write_CallHandler(asmjit::X86Assembler& a, HookType_t type);
void Write_SaveRegisters(asmjit::X86Assembler& a);
void Write_RestoreRegisters(asmjit::X86Assembler& a);
void* CreatePostCallback();
bool __cdecl HookHandler(HookType_t type);
void* __cdecl GetReturnAddress(void* pESP);
void __cdecl SetReturnAddress(void* pRetAddr, void* pESP);
public:
std::map<HookType_t, std::list<HookHandlerFn*> > m_hookHandler;
// Address of the original function
void* m_pFunc;
asmjit::JitRuntime m_Runtime;
ICallingConvention* m_pCallingConvention;
// Address of the bridge
void* m_pBridge;
// Address of the trampoline
void* m_pTrampoline;
// Register storage
CRegisters* m_pRegisters;
// New return address
void* m_pNewRetAddr;
std::map<void*, void*> m_RetAddr;
};
#endif // _HOOK_H

97
DynamicHooks/manager.cpp Normal file
View File

@ -0,0 +1,97 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#include "manager.h"
// ============================================================================
// >> CHookManager
// ============================================================================
CHook* CHookManager::HookFunction(void* pFunc, ICallingConvention* pConvention)
{
if (!pFunc)
return NULL;
CHook* pHook = FindHook(pFunc);
if (pHook)
{
delete pConvention;
return pHook;
}
pHook = new CHook(pFunc, pConvention);
m_Hooks.push_back(pHook);
return pHook;
}
void CHookManager::UnhookFunction(void* pFunc)
{
CHook* pHook = FindHook(pFunc);
if (pHook)
{
m_Hooks.remove(pHook);
delete pHook;
}
}
CHook* CHookManager::FindHook(void* pFunc)
{
if (!pFunc)
return NULL;
for(std::list<CHook *>::iterator it=m_Hooks.begin(); it != m_Hooks.end(); it++)
{
CHook* pHook = *it;
if (pHook->m_pFunc == pFunc)
return pHook;
}
return NULL;
}
void CHookManager::UnhookAllFunctions()
{
for(std::list<CHook *>::iterator it=m_Hooks.begin(); it != m_Hooks.end(); it++)
delete *it;
m_Hooks.clear();
}
// ============================================================================
// >> GetHookManager
// ============================================================================
CHookManager* GetHookManager()
{
static CHookManager* s_pManager = new CHookManager;
return s_pManager;
}

83
DynamicHooks/manager.h Normal file
View File

@ -0,0 +1,83 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _MANAGER_H
#define _MANAGER_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include <list>
#include "hook.h"
#include "convention.h"
// ============================================================================
// >> CHookManager
// ============================================================================
class CHookManager
{
public:
/*
Hooks the given function and returns a new CHook instance. If the
function was already hooked, the existing CHook instance will be
returned.
*/
CHook* HookFunction(void* pFunc, ICallingConvention* pConvention);
/*
Removes all callbacks and restores the original function.
*/
void UnhookFunction(void* pFunc);
/*
Returns either NULL or the found CHook instance.
*/
CHook* FindHook(void* pFunc);
/*
Removes all callbacks and restores all functions.
*/
void UnhookAllFunctions();
public:
std::list<CHook *> m_Hooks;
};
// ============================================================================
// >> GetHookManager
// ============================================================================
/*
Returns a pointer to a static CHookManager object.
*/
CHookManager* GetHookManager();
#endif // _MANAGER_H

381
DynamicHooks/registers.cpp Normal file
View File

@ -0,0 +1,381 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#include "registers.h"
CRegisters::CRegisters(std::list<Register_t> registers)
{
// ========================================================================
// >> 8-bit General purpose registers
// ========================================================================
m_al = CreateRegister(registers, AL, 1);
m_cl = CreateRegister(registers, CL, 1);
m_dl = CreateRegister(registers, DL, 1);
m_bl = CreateRegister(registers, BL, 1);
// 64-bit mode only
/*
m_spl = CreateRegister(registers, SPL, 1);
m_bpl = CreateRegister(registers, BPL, 1);
m_sil = CreateRegister(registers, SIL, 1);
m_dil = CreateRegister(registers, DIL, 1);
m_r8b = CreateRegister(registers, R8B, 1);
m_r9b = CreateRegister(registers, R9B, 1);
m_r10b = CreateRegister(registers, R10B, 1);
m_r11b = CreateRegister(registers, R11B, 1);
m_r12b = CreateRegister(registers, R12B, 1);
m_r13b = CreateRegister(registers, R13B, 1);
m_r14b = CreateRegister(registers, R14B, 1);
m_r15b = CreateRegister(registers, R15B, 1);
*/
m_ah = CreateRegister(registers, AH, 1);
m_ch = CreateRegister(registers, CH, 1);
m_dh = CreateRegister(registers, DH, 1);
m_bh = CreateRegister(registers, BH, 1);
// ========================================================================
// >> 16-bit General purpose registers
// ========================================================================
m_ax = CreateRegister(registers, AX, 2);
m_cx = CreateRegister(registers, CX, 2);
m_dx = CreateRegister(registers, DX, 2);
m_bx = CreateRegister(registers, BX, 2);
m_sp = CreateRegister(registers, SP, 2);
m_bp = CreateRegister(registers, BP, 2);
m_si = CreateRegister(registers, SI, 2);
m_di = CreateRegister(registers, DI, 2);
// 64-bit mode only
/*
m_r8w = CreateRegister(registers, R8W, 2);
m_r9w = CreateRegister(registers, R9W, 2);
m_r10w = CreateRegister(registers, R10W, 2);
m_r11w = CreateRegister(registers, R11W, 2);
m_r12w = CreateRegister(registers, R12W, 2);
m_r13w = CreateRegister(registers, R13W, 2);
m_r14w = CreateRegister(registers, R14W, 2);
m_r15w = CreateRegister(registers, R14W, 2);
*/
// ========================================================================
// >> 32-bit General purpose registers
// ========================================================================
m_eax = CreateRegister(registers, EAX, 4);
m_ecx = CreateRegister(registers, ECX, 4);
m_edx = CreateRegister(registers, EDX, 4);
m_ebx = CreateRegister(registers, EBX, 4);
m_esp = CreateRegister(registers, ESP, 4);
m_ebp = CreateRegister(registers, EBP, 4);
m_esi = CreateRegister(registers, ESI, 4);
m_edi = CreateRegister(registers, EDI, 4);
// 64-bit mode only
/*
m_r8d = CreateRegister(registers, R8D, 4);
m_r9d = CreateRegister(registers, R9D, 4);
m_r10d = CreateRegister(registers, R10D, 4);
m_r11d = CreateRegister(registers, R11D, 4);
m_r12d = CreateRegister(registers, R12D, 4);
m_r13d = CreateRegister(registers, R13D, 4);
m_r14d = CreateRegister(registers, R14D, 4);
m_r15d = CreateRegister(registers, R15D, 4);
*/
// ========================================================================
// >> 64-bit General purpose registers
// ========================================================================
// 64-bit mode only
/*
m_rax = CreateRegister(registers, RAX, 8);
m_rcx = CreateRegister(registers, RCX, 8);
m_rdx = CreateRegister(registers, RDX, 8);
m_rbx = CreateRegister(registers, RBX, 8);
m_rsp = CreateRegister(registers, RSP, 8);
m_rbp = CreateRegister(registers, RBP, 8);
m_rsi = CreateRegister(registers, RSI, 8);
m_rdi = CreateRegister(registers, RDI, 8);
*/
// 64-bit mode only
/*
m_r8 = CreateRegister(registers, R8, 8);
m_r9 = CreateRegister(registers, R9, 8);
m_r10 = CreateRegister(registers, R10, 8);
m_r11 = CreateRegister(registers, R11, 8);
m_r12 = CreateRegister(registers, R12, 8);
m_r13 = CreateRegister(registers, R13, 8);
m_r14 = CreateRegister(registers, R14, 8);
m_r15 = CreateRegister(registers, R15, 8);
*/
// ========================================================================
// >> 64-bit MM (MMX) registers
// ========================================================================
m_mm0 = CreateRegister(registers, MM0, 8);
m_mm1 = CreateRegister(registers, MM1, 8);
m_mm2 = CreateRegister(registers, MM2, 8);
m_mm3 = CreateRegister(registers, MM3, 8);
m_mm4 = CreateRegister(registers, MM4, 8);
m_mm5 = CreateRegister(registers, MM5, 8);
m_mm6 = CreateRegister(registers, MM6, 8);
m_mm7 = CreateRegister(registers, MM7, 8);
// ========================================================================
// >> 128-bit XMM registers
// ========================================================================
m_xmm0 = CreateRegister(registers, XMM0, 16);
m_xmm1 = CreateRegister(registers, XMM1, 16);
m_xmm2 = CreateRegister(registers, XMM2, 16);
m_xmm3 = CreateRegister(registers, XMM3, 16);
m_xmm4 = CreateRegister(registers, XMM4, 16);
m_xmm5 = CreateRegister(registers, XMM5, 16);
m_xmm6 = CreateRegister(registers, XMM6, 16);
m_xmm7 = CreateRegister(registers, XMM7, 16);
// 64-bit mode only
/*
m_xmm8 = CreateRegister(registers, XMM8, 16);
m_xmm9 = CreateRegister(registers, XMM9, 16);
m_xmm10 = CreateRegister(registers, XMM10, 16);
m_xmm11 = CreateRegister(registers, XMM11, 16);
m_xmm12 = CreateRegister(registers, XMM12, 16);
m_xmm13 = CreateRegister(registers, XMM13, 16);
m_xmm14 = CreateRegister(registers, XMM14, 16);
m_xmm15 = CreateRegister(registers, XMM15, 16);
*/
// ========================================================================
// >> 16-bit Segment registers
// ========================================================================
m_cs = CreateRegister(registers, CS, 2);
m_ss = CreateRegister(registers, SS, 2);
m_ds = CreateRegister(registers, DS, 2);
m_es = CreateRegister(registers, ES, 2);
m_fs = CreateRegister(registers, FS, 2);
m_gs = CreateRegister(registers, GS, 2);
// ========================================================================
// >> 80-bit FPU registers
// ========================================================================
m_st0 = CreateRegister(registers, ST0, 10);
m_st1 = CreateRegister(registers, ST1, 10);
m_st2 = CreateRegister(registers, ST2, 10);
m_st3 = CreateRegister(registers, ST3, 10);
m_st4 = CreateRegister(registers, ST4, 10);
m_st5 = CreateRegister(registers, ST5, 10);
m_st6 = CreateRegister(registers, ST6, 10);
m_st7 = CreateRegister(registers, ST7, 10);
}
CRegisters::~CRegisters()
{
// ========================================================================
// >> 8-bit General purpose registers
// ========================================================================
DeleteRegister(m_al);
DeleteRegister(m_cl);
DeleteRegister(m_dl);
DeleteRegister(m_bl);
// 64-bit mode only
/*
DeleteRegister(m_spl);
DeleteRegister(m_bpl);
DeleteRegister(m_sil);
DeleteRegister(m_dil);
DeleteRegister(m_r8b);
DeleteRegister(m_r9b);
DeleteRegister(m_r10b);
DeleteRegister(m_r11b);
DeleteRegister(m_r12b);
DeleteRegister(m_r13b);
DeleteRegister(m_r14b);
DeleteRegister(m_r15b);
*/
DeleteRegister(m_ah);
DeleteRegister(m_ch);
DeleteRegister(m_dh);
DeleteRegister(m_bh);
// ========================================================================
// >> 16-bit General purpose registers
// ========================================================================
DeleteRegister(m_ax);
DeleteRegister(m_cx);
DeleteRegister(m_dx);
DeleteRegister(m_bx);
DeleteRegister(m_sp);
DeleteRegister(m_bp);
DeleteRegister(m_si);
DeleteRegister(m_di);
// 64-bit mode only
/*
DeleteRegister(m_r8w);
DeleteRegister(m_r9w);
DeleteRegister(m_r10w);
DeleteRegister(m_r11w);
DeleteRegister(m_r12w);
DeleteRegister(m_r13w);
DeleteRegister(m_r14w);
DeleteRegister(m_r15w);
*/
// ========================================================================
// >> 32-bit General purpose registers
// ========================================================================
DeleteRegister(m_eax);
DeleteRegister(m_ecx);
DeleteRegister(m_edx);
DeleteRegister(m_ebx);
DeleteRegister(m_esp);
DeleteRegister(m_ebp);
DeleteRegister(m_esi);
DeleteRegister(m_edi);
// 64-bit mode only
/*
DeleteRegister(m_r8d);
DeleteRegister(m_r9d);
DeleteRegister(m_r10d);
DeleteRegister(m_r11d);
DeleteRegister(m_r12d);
DeleteRegister(m_r13d);
DeleteRegister(m_r14d);
DeleteRegister(m_r15d);
*/
// ========================================================================
// >> 64-bit General purpose registers
// ========================================================================
// 64-bit mode only
/*
DeleteRegister(m_rax);
DeleteRegister(m_rcx);
DeleteRegister(m_rdx);
DeleteRegister(m_rbx);
DeleteRegister(m_rsp);
DeleteRegister(m_rbp);
DeleteRegister(m_rsi);
DeleteRegister(m_rdi);
*/
// 64-bit mode only
/*
DeleteRegister(m_r8);
DeleteRegister(m_r9);
DeleteRegister(m_r10);
DeleteRegister(m_r11);
DeleteRegister(m_r12);
DeleteRegister(m_r13);
DeleteRegister(m_r14);
DeleteRegister(m_r15);
*/
// ========================================================================
// >> 64-bit MM (MMX) registers
// ========================================================================
DeleteRegister(m_mm0);
DeleteRegister(m_mm1);
DeleteRegister(m_mm2);
DeleteRegister(m_mm3);
DeleteRegister(m_mm4);
DeleteRegister(m_mm5);
DeleteRegister(m_mm6);
DeleteRegister(m_mm7);
// ========================================================================
// >> 128-bit XMM registers
// ========================================================================
DeleteRegister(m_xmm0);
DeleteRegister(m_xmm1);
DeleteRegister(m_xmm2);
DeleteRegister(m_xmm3);
DeleteRegister(m_xmm4);
DeleteRegister(m_xmm5);
DeleteRegister(m_xmm6);
DeleteRegister(m_xmm7);
// 64-bit mode only
/*
DeleteRegister(m_xmm8);
DeleteRegister(m_xmm9);
DeleteRegister(m_xmm10);
DeleteRegister(m_xmm11);
DeleteRegister(m_xmm12);
DeleteRegister(m_xmm13);
DeleteRegister(m_xmm14);
DeleteRegister(m_xmm15);
*/
// ========================================================================
// >> 2-bit Segment registers
// ========================================================================
DeleteRegister(m_cs);
DeleteRegister(m_ss);
DeleteRegister(m_ds);
DeleteRegister(m_es);
DeleteRegister(m_fs);
DeleteRegister(m_gs);
// ========================================================================
// >> 80-bit FPU registers
// ========================================================================
DeleteRegister(m_st0);
DeleteRegister(m_st1);
DeleteRegister(m_st2);
DeleteRegister(m_st3);
DeleteRegister(m_st4);
DeleteRegister(m_st5);
DeleteRegister(m_st6);
DeleteRegister(m_st7);
}
CRegister* CRegisters::CreateRegister(std::list<Register_t>& registers, Register_t reg, int iSize)
{
for(std::list<Register_t>::iterator it=registers.begin(); it != registers.end(); it++)
{
if ((*it) == reg)
{
return new CRegister(iSize);
}
}
return NULL;
}
void CRegisters::DeleteRegister(CRegister* pRegister)
{
if (pRegister)
{
delete pRegister;
}
}

435
DynamicHooks/registers.h Normal file
View File

@ -0,0 +1,435 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _REGISTERS_H
#define _REGISTERS_H
// ============================================================================
// >> INCLUDES
// ============================================================================
#include <stdlib.h>
#include <list>
// ============================================================================
// >> Register_t
// ============================================================================
enum Register_t
{
// ========================================================================
// >> 8-bit General purpose registers
// ========================================================================
AL,
CL,
DL,
BL,
// 64-bit mode only
/*
SPL,
BPL,
SIL,
DIL,
R8B,
R9B,
R10B,
R11B,
R12B,
R13B,
R14B,
R15B,
*/
AH,
CH,
DH,
BH,
// ========================================================================
// >> 16-bit General purpose registers
// ========================================================================
AX,
CX,
DX,
BX,
SP,
BP,
SI,
DI,
// 64-bit mode only
/*
R8W,
R9W,
R10W,
R11W,
R12W,
R13W,
R14W,
R15W,
*/
// ========================================================================
// >> 32-bit General purpose registers
// ========================================================================
EAX,
ECX,
EDX,
EBX,
ESP,
EBP,
ESI,
EDI,
// 64-bit mode only
/*
R8D,
R9D,
R10D,
R11D,
R12D,
R13D,
R14D,
R15D,
*/
// ========================================================================
// >> 64-bit General purpose registers
// ========================================================================
// 64-bit mode only
/*
RAX,
RCX,
RDX,
RBX,
RSP,
RBP,
RSI,
RDI,
*/
// 64-bit mode only
/*
R8,
R9,
R10,
R11,
R12,
R13,
R14,
R15,
*/
// ========================================================================
// >> 64-bit MM (MMX) registers
// ========================================================================
MM0,
MM1,
MM2,
MM3,
MM4,
MM5,
MM6,
MM7,
// ========================================================================
// >> 128-bit XMM registers
// ========================================================================
XMM0,
XMM1,
XMM2,
XMM3,
XMM4,
XMM5,
XMM6,
XMM7,
// 64-bit mode only
/*
XMM8,
XMM9,
XMM10,
XMM11,
XMM12,
XMM13,
XMM14,
XMM15,
*/
// ========================================================================
// >> 16-bit Segment registers
// ========================================================================
CS,
SS,
DS,
ES,
FS,
GS,
// ========================================================================
// >> 80-bit FPU registers
// ========================================================================
ST0,
ST1,
ST2,
ST3,
ST4,
ST5,
ST6,
ST7,
};
// ============================================================================
// >> CRegister
// ============================================================================
class CRegister
{
public:
CRegister(int iSize)
{
m_iSize = iSize;
m_pAddress = malloc(iSize);
}
~CRegister()
{
free(m_pAddress);
}
template<class T>
T GetValue()
{
return *(T *) m_pAddress;
}
template<class T>
T GetPointerValue(int iOffset=0)
{
return *(T *) (GetValue<unsigned long>() + iOffset);
}
template<class T>
void SetValue(T value)
{
*(T *) m_pAddress = value;
}
template<class T>
void SetPointerValue(T value, int iOffset=0)
{
*(T *) (GetValue<unsigned long>() + iOffset) = value;
}
public:
int m_iSize;
void* m_pAddress;
};
// ============================================================================
// >> CRegisters
// ============================================================================
class CRegisters
{
public:
CRegisters(std::list<Register_t> registers);
~CRegisters();
private:
CRegister* CreateRegister(std::list<Register_t>& registers, Register_t reg, int iSize);
void DeleteRegister(CRegister* pRegister);
public:
// ========================================================================
// >> 8-bit General purpose registers
// ========================================================================
CRegister* m_al;
CRegister* m_cl;
CRegister* m_dl;
CRegister* m_bl;
// 64-bit mode only
/*
CRegister* m_spl;
CRegister* m_bpl;
CRegister* m_sil;
CRegister* m_dil;
CRegister* m_r8b;
CRegister* m_r9b;
CRegister* m_r10b;
CRegister* m_r11b;
CRegister* m_r12b;
CRegister* m_r13b;
CRegister* m_r14b;
CRegister* m_r15b;
*/
CRegister* m_ah;
CRegister* m_ch;
CRegister* m_dh;
CRegister* m_bh;
// ========================================================================
// >> 16-bit General purpose registers
// ========================================================================
CRegister* m_ax;
CRegister* m_cx;
CRegister* m_dx;
CRegister* m_bx;
CRegister* m_sp;
CRegister* m_bp;
CRegister* m_si;
CRegister* m_di;
// 64-bit mode only
/*
CRegister* m_r8w;
CRegister* m_r9w;
CRegister* m_r10w;
CRegister* m_r11w;
CRegister* m_r12w;
CRegister* m_r13w;
CRegister* m_r14w;
CRegister* m_r15w;
*/
// ========================================================================
// >> 32-bit General purpose registers
// ========================================================================
CRegister* m_eax;
CRegister* m_ecx;
CRegister* m_edx;
CRegister* m_ebx;
CRegister* m_esp;
CRegister* m_ebp;
CRegister* m_esi;
CRegister* m_edi;
// 64-bit mode only
/*
CRegister* m_r8d;
CRegister* m_r9d;
CRegister* m_r10d;
CRegister* m_r11d;
CRegister* m_r12d;
CRegister* m_r13d;
CRegister* m_r14d;
CRegister* m_r15d;
*/
// ========================================================================
// >> 64-bit General purpose registers
// ========================================================================
// 64-bit mode only
/*
CRegister* m_rax;
CRegister* m_rcx;
CRegister* m_rdx;
CRegister* m_rbx;
CRegister* m_rsp;
CRegister* m_rbp;
CRegister* m_rsi;
CRegister* m_rdi;
*/
// 64-bit mode only
/*
CRegister* m_r8;
CRegister* m_r9;
CRegister* m_r10;
CRegister* m_r11;
CRegister* m_r12;
CRegister* m_r13;
CRegister* m_r14;
CRegister* m_r15;
*/
// ========================================================================
// >> 64-bit MM (MMX) registers
// ========================================================================
CRegister* m_mm0;
CRegister* m_mm1;
CRegister* m_mm2;
CRegister* m_mm3;
CRegister* m_mm4;
CRegister* m_mm5;
CRegister* m_mm6;
CRegister* m_mm7;
// ========================================================================
// >> 128-bit XMM registers
// ========================================================================
CRegister* m_xmm0;
CRegister* m_xmm1;
CRegister* m_xmm2;
CRegister* m_xmm3;
CRegister* m_xmm4;
CRegister* m_xmm5;
CRegister* m_xmm6;
CRegister* m_xmm7;
// 64-bit mode only
/*
CRegister* m_xmm8;
CRegister* m_xmm9;
CRegister* m_xmm10;
CRegister* m_xmm11;
CRegister* m_xmm12;
CRegister* m_xmm13;
CRegister* m_xmm14;
CRegister* m_xmm15;
*/
// ========================================================================
// >> 16-bit Segment registers
// ========================================================================
CRegister* m_cs;
CRegister* m_ss;
CRegister* m_ds;
CRegister* m_es;
CRegister* m_fs;
CRegister* m_gs;
// ========================================================================
// >> 80-bit FPU registers
// ========================================================================
CRegister* m_st0;
CRegister* m_st1;
CRegister* m_st2;
CRegister* m_st3;
CRegister* m_st4;
CRegister* m_st5;
CRegister* m_st6;
CRegister* m_st7;
};
#endif // _REGISTERS_H

View File

@ -0,0 +1,76 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Dependencies]
#if !defined(_ASMJIT_BUILD_H)
#include "./build.h"
#endif // !_ASMJIT_BUILD_H
// [Guard]
#if !defined(ASMJIT_API_SCOPE)
# define ASMJIT_API_SCOPE
#else
# error "[asmjit] Api-Scope is already active, previous scope not closed by apiend.h?"
#endif // ASMJIT_API_SCOPE
// [NoExcept]
#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept)
# define noexcept ASMJIT_NOEXCEPT
# define ASMJIT_UNDEF_NOEXCEPT
#endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept
// [NullPtr]
#if !ASMJIT_CC_HAS_NULLPTR && !defined(nullptr)
# define nullptr NULL
# define ASMJIT_UNDEF_NULLPTR
#endif // !ASMJIT_CC_HAS_NULLPTR && !nullptr
// [Override]
#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override)
# define override
# define ASMJIT_UNDEF_OVERRIDE
#endif // !ASMJIT_CC_HAS_OVERRIDE && !override
// [CLang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunnamed-type-template-args"
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic push
# pragma GCC diagnostic warning "-Winline"
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
# pragma warning(disable: 4201) // nameless struct/union
# pragma warning(disable: 4244) // '+=' : conversion from 'int' to 'x', possible
// loss of data
# pragma warning(disable: 4251) // struct needs to have dll-interface to be used
// by clients of struct ...
# pragma warning(disable: 4275) // non dll-interface struct ... used as base for
// dll-interface struct
# pragma warning(disable: 4355) // this used in base member initializer list
# pragma warning(disable: 4480) // specifying underlying type for enum
# pragma warning(disable: 4800) // forcing value to bool 'true' or 'false'
// TODO: Check if these defines are needed and for which version of MSC. There are
// news about these as they are part of C99.
# if !defined(vsnprintf)
# define ASMJIT_UNDEF_VSNPRINTF
# define vsnprintf _vsnprintf
# endif // !vsnprintf
# if !defined(snprintf)
# define ASMJIT_UNDEF_SNPRINTF
# define snprintf _snprintf
# endif // !snprintf
#endif // ASMJIT_CC_MSC

53
DynamicHooks/thirdparty/AsmJit/apiend.h vendored Normal file
View File

@ -0,0 +1,53 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#if defined(ASMJIT_API_SCOPE)
# undef ASMJIT_API_SCOPE
#else
# error "[asmjit] Api-Scope not active, forgot to include apibegin.h?"
#endif // ASMJIT_API_SCOPE
// [NoExcept]
#if defined(ASMJIT_UNDEF_NOEXCEPT)
# undef noexcept
# undef ASMJIT_UNDEF_NOEXCEPT
#endif // ASMJIT_UNDEF_NOEXCEPT
// [NullPtr]
#if defined(ASMJIT_UNDEF_NULLPTR)
# undef nullptr
# undef ASMJIT_UNDEF_NULLPTR
#endif // ASMJIT_UNDEF_NULLPTR
// [Override]
#if defined(ASMJIT_UNDEF_OVERRIDE)
# undef override
# undef ASMJIT_UNDEF_OVERRIDE
#endif // ASMJIT_UNDEF_OVERRIDE
// [CLang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic pop
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic pop
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(pop)
# if defined(ASMJIT_UNDEF_VSNPRINTF)
# undef vsnprintf
# undef ASMJIT_UNDEF_VSNPRINTF
# endif // ASMJIT_UNDEF_VSNPRINTF
# if defined(ASMJIT_UNDEF_SNPRINTF)
# undef snprintf
# undef ASMJIT_UNDEF_SNPRINTF
# endif // ASMJIT_UNDEF_SNPRINTF
#endif // ASMJIT_CC_MSC

20
DynamicHooks/thirdparty/AsmJit/arm.h vendored Normal file
View File

@ -0,0 +1,20 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_ARM_H
#define _ASMJIT_ARM_H
// [Dependencies]
#include "./base.h"
#include "./arm/armassembler.h"
#include "./arm/armcompiler.h"
#include "./arm/arminst.h"
#include "./arm/armoperand.h"
// [Guard]
#endif // _ASMJIT_ARM_H

360
DynamicHooks/thirdparty/AsmJit/asmjit.h vendored Normal file
View File

@ -0,0 +1,360 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_ASMJIT_H
#define _ASMJIT_ASMJIT_H
// ============================================================================
// [asmjit_mainpage]
// ============================================================================
//! \mainpage
//!
//! AsmJit - Complete x86/x64 JIT and Remote Assembler for C++.
//!
//! A complete JIT and remote assembler for C++ language. It can generate native
//! code for x86 and x64 architectures and supports the whole x86/x64 instruction
//! set - from legacy MMX to the newest AVX2. It has a type-safe API that allows
//! C++ compiler to do semantic checks at compile-time even before the assembled
//! code is generated and executed.
//!
//! AsmJit is not a virtual machine (VM). It doesn't have functionality to
//! implement VM out of the box; however, it can be be used as a JIT backend
//! of your own VM. The usage of AsmJit is not limited at all; it's suitable
//! for multimedia, VM backends, remote code generation, and many other tasks.
//!
//! \section AsmJit_Main_Concepts Code Generation Concepts
//!
//! AsmJit has two completely different code generation concepts. The difference
//! is in how the code is generated. The first concept, also referred as a low
//! level concept, is called `Assembler` and it's the same as writing RAW
//! assembly by inserting instructions that use physical registers directly. In
//! this case AsmJit does only instruction encoding, verification and final code
//! relocation.
//!
//! The second concept, also referred as a high level concept, is called
//! `Compiler`. Compiler lets you use virtually unlimited number of registers
//! (it calls them variables), which significantly simplifies the code generation
//! process. Compiler allocates these virtual registers to physical registers
//! after the code generation is done. This requires some extra effort - Compiler
//! has to generate information for each node (instruction, function declaration,
//! function call, etc...) in the code, perform a variable liveness analysis and
//! translate the code using variables to a code that uses only physical registers.
//!
//! In addition, Compiler understands functions and their calling conventions.
//! It has been designed in a way that the code generated is always a function
//! having a prototype like a real programming language. By having a function
//! prototype the Compiler is able to insert prolog and epilog sequence to the
//! function being generated and it's able to also generate a necessary code
//! to call other function from your own code.
//!
//! There is no conclusion on which concept is better. `Assembler` brings full
//! control and the best performance, while `Compiler` makes the code-generation
//! more fun and more portable.
//!
//! \section AsmJit_Main_Sections Documentation Sections
//!
//! AsmJit documentation is structured into the following sections:
//! - \ref asmjit_base "Base" - Base API (architecture independent).
//! - \ref asmjit_x86 "X86/X64" - X86/X64 API.
//!
//! \section AsmJit_Main_HomePage AsmJit Homepage
//!
//! - https://github.com/kobalicek/asmjit
// ============================================================================
// [asmjit_base]
// ============================================================================
//! \defgroup asmjit_base AsmJit Base API (architecture independent)
//!
//! \brief Base API.
//!
//! Base API contains all classes that are platform and architecture independent.
//!
//! Code-Generation and Operands
//! ----------------------------
//!
//! List of the most useful code-generation and operand classes:
//! - \ref asmjit::Assembler - Low-level code-generation.
//! - \ref asmjit::ExternalTool - An external tool that can serialize to `Assembler`:
//! - \ref asmjit::Compiler - High-level code-generation.
//! - \ref asmjit::Runtime - Describes where the code is stored and how it's executed:
//! - \ref asmjit::HostRuntime - Runtime that runs on the host machine:
//! - \ref asmjit::JitRuntime - Runtime designed for JIT code generation and execution.
//! - \ref asmjit::StaticRuntime - Runtime for code that starts at a specific address.
//! - \ref asmjit::Stream - Stream is a list of \ref HLNode objects stored as a double
//! linked list:
//! - \ref asmjit::HLNode - Base node interface:
//! - \ref asmjit::HLInst - Instruction node.
//! - \ref asmjit::HLData - Data node.
//! - \ref asmjit::HLAlign - Align directive node.
//! - \ref asmjit::HLLabel - Label node.
//! - \ref asmjit::HLComment - Comment node.
//! - \ref asmjit::HLSentinel - Sentinel node.
//! - \ref asmjit::HLHint - Instruction node.
//! - \ref asmjit::HLFunc - Function declaration node.
//! - \ref asmjit::HLRet - Function return node.
//! - \ref asmjit::HLCall - Function call node.
//! - \ref asmjit::HLCallArg - Function call argument node.
//! - \ref asmjit::Operand - base class for all operands:
//! - \ref asmjit::Reg - Register operand (`Assembler` only).
//! - \ref asmjit::Var - Variable operand (`Compiler` only).
//! - \ref asmjit::Mem - Memory operand.
//! - \ref asmjit::Imm - Immediate operand.
//! - \ref asmjit::Label - Label operand.
//!
//! The following snippet shows how to setup a basic JIT code generation:
//!
//! ~~~
//! using namespace asmjit;
//!
//! int main(int argc, char* argv[]) {
//! // JIT runtime is designed for JIT code generation and execution.
//! JitRuntime runtime;
//!
//! // Assembler instance requires to know the runtime to function.
//! X86Assembler a(&runtime);
//!
//! // Compiler (if you indend to use it) requires an assembler instance.
//! X86Compiler c(&a);
//!
//! return 0;
//! }
//! ~~~
//!
//! Logging and Error Handling
//! --------------------------
//!
//! AsmJit contains a robust interface that can be used to log the generated code
//! and to handle possible errors. Base logging interface is provided by \ref
//! Logger, which is abstract and can be used as a base for your own logger.
//! AsmJit also implements some trivial logging concepts out of the box to
//! simplify the development. \ref FileLogger logs into a C `FILE*` stream and
//! \ref StringLogger concatenates all log messages into a single string.
//!
//! The following snippet shows how to setup a basic logger and error handler:
//!
//! ~~~
//! using namespace asmjit;
//!
//! struct MyErrorHandler : public ErrorHandler {
//! virtual bool handleError(Error code, const char* message, void* origin) {
//! printf("Error 0x%0.8X: %s\n", code, message);
//!
//! // True - error handled and code generation can continue.
//! // False - error not handled, code generation should stop.
//! return false;
//! }
//! }
//!
//! int main(int argc, char* argv[]) {
//! JitRuntime runtime;
//! FileLogger logger(stderr);
//! MyErrorHandler eh;
//!
//! X86Assembler a(&runtime);
//! a.setLogger(&logger);
//! a.setErrorHandler(&eh);
//!
//! ...
//!
//! return 0;
//! }
//! ~~~
//!
//! AsmJit also contains an \ref ErrorHandler, which is an abstract class that
//! can be used to implement your own error handling. It can be associated with
//! \ref Assembler and used to report all errors. It's a very convenient way to
//! be aware of any error that happens during the code generation without making
//! the error handling complicated.
//!
//! List of the most useful logging and error handling classes:
//! - \ref asmjit::Logger - abstract logging interface:
//! - \ref asmjit::FileLogger - A logger that logs to `FILE*`.
//! - \ref asmjit::StringLogger - A logger that concatenates to a single string.
//! - \ref asmjit::ErrorHandler - Easy way to handle \ref Assembler and \ref
//! Compiler
//! errors.
//!
//! Zone Memory Allocator
//! ---------------------
//!
//! Zone memory allocator is an incremental memory allocator that can be used
//! to allocate data of short life-time. It has much better performance
//! characteristics than all other allocators, because the only thing it can do
//! is to increment a pointer and return its previous address. See \ref Zone
//! for more details.
//!
//! The whole AsmJit library is based on zone memory allocation for performance
//! reasons. It has many other benefits, but the performance was the main one
//! when designing the library.
//!
//! POD Containers
//! --------------
//!
//! POD containers are used by AsmJit to manage its own data structures. The
//! following classes can be used by AsmJit consumers:
//!
//! - \ref asmjit::BitArray - A fixed bit-array that is used internally.
//! - \ref asmjit::PodVector<T> - A simple array-like container for storing
//! POD data.
//! - \ref asmjit::PodList<T> - A single linked list.
//! - \ref asmjit::StringBuilder - A string builder that can append strings
//! and integers.
//!
//! Utility Functions
//! -----------------
//!
//! Utility functions are implementated static class \ref Utils. There are
//! utilities for bit manipulation and bit counting, utilities to get an
//! integer minimum / maximum and various other helpers required to perform
//! alignment checks and binary casting from float to integer and vice versa.
//!
//! String utilities are also implemented by a static class \ref Utils. They
//! are mostly used by AsmJit internals and not really important to end users.
//!
//! SIMD Utilities
//! --------------
//!
//! SIMD code generation often requires to embed constants after each function
//! or at the end of the whole code block. AsmJit contains `Vec64`, `Vec128`
//! and `Vec256` classes that can be used to prepare data useful when generating
//! SIMD code.
//!
//! X86/X64 code generators contain member functions `dmm`, `dxmm`, and `dymm`,
//! which can be used to embed 64-bit, 128-bit and 256-bit data structures into
//! the machine code.
// ============================================================================
// [asmjit_x86]
// ============================================================================
//! \defgroup asmjit_x86 AsmJit X86/X64 API
//!
//! \brief X86/X64 API
//!
//! X86/X64 Code Generation
//! -----------------------
//!
//! X86/X64 code generation is realized throught:
//! - \ref X86Assembler - low-level code generation.
//! - \ref X86Compiler - high-level code generation.
//!
//! X86/X64 Registers
//! -----------------
//!
//! There are static objects that represents X86 and X64 registers. They can
//! be used directly (like `eax`, `mm`, `xmm`, ...) or created through
//! these functions:
//!
//! - `asmjit::x86::gpb_lo()` - Get an 8-bit low GPB register.
//! - `asmjit::x86::gpb_hi()` - Get an 8-bit high GPB register.
//! - `asmjit::x86::gpw()` - Get a 16-bit GPW register.
//! - `asmjit::x86::gpd()` - Get a 32-bit GPD register.
//! - `asmjit::x86::gpq()` - Get a 64-bit GPQ Gp register.
//! - `asmjit::x86::gpz()` - Get a 32-bit or 64-bit GPD/GPQ register.
//! - `asmjit::x86::fp()` - Get a 80-bit FPU register.
//! - `asmjit::x86::mm()` - Get a 64-bit MMX register.
//! - `asmjit::x86::xmm()` - Get a 128-bit XMM register.
//! - `asmjit::x86::ymm()` - Get a 256-bit YMM register.
//! - `asmjit::x86::amm()` - Get a 512-bit ZMM register.
//!
//! X86/X64 Addressing
//! ------------------
//!
//! X86 and x64 architectures contains several addressing modes and most ones
//! are possible with AsmJit library. Memory represents are represented by
//! `BaseMem` class. These functions are used to make operands that represents
//! memory addresses:
//!
//! - `asmjit::x86::ptr()` - Address size not specified.
//! - `asmjit::x86::byte_ptr()` - 1 byte.
//! - `asmjit::x86::word_ptr()` - 2 bytes (GPW size).
//! - `asmjit::x86::dword_ptr()` - 4 bytes (GPD size).
//! - `asmjit::x86::qword_ptr()` - 8 bytes (GPQ/MMX size).
//! - `asmjit::x86::tword_ptr()` - 10 bytes (FPU size).
//! - `asmjit::x86::dqword_ptr()` - 16 bytes (XMM size).
//! - `asmjit::x86::yword_ptr()` - 32 bytes (YMM size).
//! - `asmjit::x86::zword_ptr()` - 64 bytes (ZMM size).
//!
//! Most useful function to make pointer should be `asmjit::x86::ptr()`. It
//! creates a pointer to the target with an unspecified size. Unspecified size
//! works in all intrinsics where are used registers (this means that size is
//! specified by register operand or by instruction itself). For example
//! `asmjit::x86::ptr()` can't be used with `Assembler::inc()` instruction. In
//! this case the size must be specified and it's also reason to differentiate
//! between pointer sizes.
//!
//! X86 and X86 support simple address forms like `[base + displacement]` and
//! also complex address forms like `[base + index * scale + displacement]`.
//!
//! X86/X64 Immediates
//! ------------------
//!
//! Immediate values are constants thats passed directly after instruction
//! opcode. To create such value use `asmjit::imm()` or `asmjit::imm_u()`
//! methods to create a signed or unsigned immediate value.
//!
//! X86/X64 CPU Information
//! -----------------------
//!
//! The CPUID instruction can be used to get an exhaustive information about
//! the host X86/X64 processor. AsmJit contains utilities that can get the most
//! important information related to the features supported by the CPU and the
//! host operating system, in addition to host processor name and number of
//! cores. Class `CpuInfo` provides generic information about a host or target
//! processor and contains also a specific X86/X64 information.
//!
//! By default AsmJit queries the CPU information after the library is loaded
//! and the queried information is reused by all instances of `JitRuntime`.
//! The global instance of `CpuInfo` can't be changed, because it will affect
//! the code generation of all `Runtime`s. If there is a need to have a
//! specific CPU information which contains modified features or processor
//! vendor it's possible by creating a new instance of the `CpuInfo` and setting
//! up its members.
//!
//! Cpu detection is important when generating a JIT code that may or may not
//! use certain CPU features. For example there used to be a SSE/SSE2 detection
//! in the past and today there is often AVX/AVX2 detection.
//!
//! The example below shows how to detect a SSE4.1 instruction set:
//!
//! ~~~
//! using namespace asmjit;
//!
//! const CpuInfo& cpuInfo = CpuInfo::getHost();
//!
//! if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE4_1)) {
//! // Processor has SSE4.1.
//! }
//! else if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE2)) {
//! // Processor doesn't have SSE4.1, but has SSE2.
//! }
//! else {
//! // Processor is archaic; it's a wonder AsmJit works here!
//! }
//! ~~~
// [Dependencies]
#include "./base.h"
// [ARM/ARM64]
#if defined(ASMJIT_BUILD_ARM32) || defined(ASMJIT_BUILD_ARM64)
#include "./arm.h"
#endif // ASMJIT_BUILD_ARM32 || ASMJIT_BUILD_ARM64
// [X86/X64]
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
#include "./x86.h"
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64
// [Host]
#include "./host.h"
// [Guard]
#endif // _ASMJIT_ASMJIT_H

35
DynamicHooks/thirdparty/AsmJit/base.h vendored Normal file
View File

@ -0,0 +1,35 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_H
#define _ASMJIT_BASE_H
// [Dependencies]
#include "./build.h"
#include "./base/assembler.h"
#include "./base/constpool.h"
#include "./base/containers.h"
#include "./base/cpuinfo.h"
#include "./base/globals.h"
#include "./base/logger.h"
#include "./base/operand.h"
#include "./base/podvector.h"
#include "./base/runtime.h"
#include "./base/utils.h"
#include "./base/vectypes.h"
#include "./base/vmem.h"
#include "./base/zone.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
#include "./base/compiler.h"
#include "./base/compilerfunc.h"
#include "./base/hlstream.h"
#endif // !ASMJIT_DISABLE_COMPILER
// [Guard]
#endif // _ASMJIT_BASE_H

View File

@ -0,0 +1,503 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/utils.h"
#include "../base/vmem.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
ErrorHandler* ErrorHandler::addRef() const noexcept {
return const_cast<ErrorHandler*>(this);
}
void ErrorHandler::release() noexcept {}
// ============================================================================
// [asmjit::ExternalTool]
// ============================================================================
ExternalTool::ExternalTool() noexcept
: _assembler(nullptr),
_exId(0),
_arch(kArchNone),
_regSize(0),
_finalized(false),
_reserved(0),
_lastError(kErrorNotInitialized) {}
ExternalTool::~ExternalTool() noexcept {}
Error ExternalTool::setLastError(Error error, const char* message) noexcept {
// Special case, reset the last error the error is `kErrorOk`.
if (error == kErrorOk) {
_lastError = kErrorOk;
return kErrorOk;
}
// Don't do anything if the code-generator doesn't have associated assembler.
Assembler* assembler = getAssembler();
if (assembler == nullptr)
return error;
if (message == nullptr)
message = DebugUtils::errorAsString(error);
// Logging is skipped if the error is handled by `ErrorHandler.
ErrorHandler* eh = assembler->getErrorHandler();
ASMJIT_TLOG("[ERROR (ExternalTool)] %s (0x%0.8u) %s\n", message,
static_cast<unsigned int>(error),
!eh ? "(Possibly unhandled?)" : "");
if (eh != nullptr && eh->handleError(error, message, this))
return error;
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = assembler->getLogger();
if (logger != nullptr)
logger->logFormat(Logger::kStyleComment,
"*** ERROR (ExternalTool): %s (0x%0.8u).\n", message,
static_cast<unsigned int>(error));
#endif // !ASMJIT_DISABLE_LOGGER
// The handler->handleError() function may throw an exception or longjmp()
// to terminate the execution of `setLastError()`. This is the reason why
// we have delayed changing the `_error` member until now.
_lastError = error;
return error;
}
// ============================================================================
// [asmjit::Assembler - Construction / Destruction]
// ============================================================================
Assembler::Assembler(Runtime* runtime) noexcept
: _runtime(runtime),
_logger(nullptr),
_errorHandler(nullptr),
_arch(kArchNone),
_regSize(0),
_reserved(0),
_asmOptions(0),
_instOptions(0),
_lastError(runtime ? kErrorOk : kErrorNotInitialized),
_exIdGenerator(0),
_exCountAttached(0),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_buffer(nullptr),
_end(nullptr),
_cursor(nullptr),
_trampolinesSize(0),
_comment(nullptr),
_unusedLinks(nullptr),
_labels(),
_relocations() {}
Assembler::~Assembler() noexcept {
reset(true);
if (_errorHandler != nullptr)
_errorHandler->release();
}
// ============================================================================
// [asmjit::Assembler - Reset]
// ============================================================================
void Assembler::reset(bool releaseMemory) noexcept {
_asmOptions = 0;
_instOptions = 0;
_lastError = kErrorOk;
_exIdGenerator = 0;
_exCountAttached = 0;
_zoneAllocator.reset(releaseMemory);
if (releaseMemory && _buffer != nullptr) {
ASMJIT_FREE(_buffer);
_buffer = nullptr;
_end = nullptr;
}
_cursor = _buffer;
_trampolinesSize = 0;
_comment = nullptr;
_unusedLinks = nullptr;
_sections.reset(releaseMemory);
_labels.reset(releaseMemory);
_relocations.reset(releaseMemory);
}
// ============================================================================
// [asmjit::Assembler - Logging & Error Handling]
// ============================================================================
Error Assembler::setLastError(Error error, const char* message) noexcept {
// Special case, reset the last error the error is `kErrorOk`.
if (error == kErrorOk) {
_lastError = kErrorOk;
return kErrorOk;
}
if (message == nullptr)
message = DebugUtils::errorAsString(error);
// Logging is skipped if the error is handled by `ErrorHandler`.
ErrorHandler* eh = _errorHandler;
ASMJIT_TLOG("[ERROR (Assembler)] %s (0x%0.8u) %s\n", message,
static_cast<unsigned int>(error),
!eh ? "(Possibly unhandled?)" : "");
if (eh != nullptr && eh->handleError(error, message, this))
return error;
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger != nullptr)
logger->logFormat(Logger::kStyleComment,
"*** ERROR (Assembler): %s (0x%0.8u).\n", message,
static_cast<unsigned int>(error));
#endif // !ASMJIT_DISABLE_LOGGER
// The handler->handleError() function may throw an exception or longjmp()
// to terminate the execution of `setLastError()`. This is the reason why
// we have delayed changing the `_error` member until now.
_lastError = error;
return error;
}
Error Assembler::setErrorHandler(ErrorHandler* handler) noexcept {
ErrorHandler* oldHandler = _errorHandler;
if (oldHandler != nullptr)
oldHandler->release();
if (handler != nullptr)
handler = handler->addRef();
_errorHandler = handler;
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Buffer]
// ============================================================================
Error Assembler::_grow(size_t n) noexcept {
size_t capacity = getCapacity();
size_t after = getOffset() + n;
// Overflow.
if (n > IntTraits<uintptr_t>::maxValue() - capacity)
return setLastError(kErrorNoHeapMemory);
// Grow is called when allocation is needed, so it shouldn't happen, but on
// the other hand it is simple to catch and it's not an error.
if (after <= capacity)
return kErrorOk;
if (capacity < kMemAllocOverhead)
capacity = kMemAllocOverhead;
else
capacity += kMemAllocOverhead;
do {
size_t oldCapacity = capacity;
if (capacity < kMemAllocGrowMax)
capacity *= 2;
else
capacity += kMemAllocGrowMax;
// Overflow.
if (oldCapacity > capacity)
return setLastError(kErrorNoHeapMemory);
} while (capacity - kMemAllocOverhead < after);
capacity -= kMemAllocOverhead;
return _reserve(capacity);
}
Error Assembler::_reserve(size_t n) noexcept {
size_t capacity = getCapacity();
if (n <= capacity)
return kErrorOk;
uint8_t* newBuffer;
if (_buffer == nullptr)
newBuffer = static_cast<uint8_t*>(ASMJIT_ALLOC(n));
else
newBuffer = static_cast<uint8_t*>(ASMJIT_REALLOC(_buffer, n));
if (newBuffer == nullptr)
return setLastError(kErrorNoHeapMemory);
size_t offset = getOffset();
_buffer = newBuffer;
_end = _buffer + n;
_cursor = newBuffer + offset;
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Label]
// ============================================================================
Error Assembler::_newLabelId() noexcept {
LabelData* data = _zoneAllocator.allocT<LabelData>();
data->offset = -1;
data->links = nullptr;
data->exId = 0;
data->exData = nullptr;
uint32_t id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labels.getLength()));
Error error = _labels.append(data);
if (error != kErrorOk) {
setLastError(kErrorNoHeapMemory);
return kInvalidValue;
}
return id;
}
LabelLink* Assembler::_newLabelLink() noexcept {
LabelLink* link = _unusedLinks;
if (link) {
_unusedLinks = link->prev;
}
else {
link = _zoneAllocator.allocT<LabelLink>();
if (link == nullptr)
return nullptr;
}
link->prev = nullptr;
link->offset = 0;
link->displacement = 0;
link->relocId = -1;
return link;
}
Error Assembler::bind(const Label& label) noexcept {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelData(index);
// Label can be bound only once.
if (data->offset != -1)
return setLastError(kErrorLabelAlreadyBound);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger) {
StringBuilderTmp<256> sb;
sb.setFormat("L%u:", index);
size_t binSize = 0;
if (!_logger->hasOption(Logger::kOptionBinaryForm))
binSize = kInvalidIndex;
LogUtil::formatLine(sb, nullptr, binSize, 0, 0, _comment);
_logger->logString(Logger::kStyleLabel, sb.getData(), sb.getLength());
}
#endif // !ASMJIT_DISABLE_LOGGER
Error error = kErrorOk;
size_t pos = getOffset();
LabelLink* link = data->links;
LabelLink* prev = nullptr;
while (link) {
intptr_t offset = link->offset;
if (link->relocId != -1) {
// Handle RelocData - We have to update RelocData information instead of
// patching the displacement in LabelData.
_relocations[link->relocId].data += static_cast<Ptr>(pos);
}
else {
// Not using relocId, this means that we are overwriting a real
// displacement in the binary stream.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->displacement);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = readU8At(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
writeI32At(offset, patchedValue);
}
else {
ASMJIT_ASSERT(size == 1);
if (Utils::isInt8(patchedValue))
writeU8At(offset, static_cast<uint32_t>(patchedValue) & 0xFF);
else
error = kErrorIllegalDisplacement;
}
}
prev = link->prev;
link = prev;
}
// Chain unused links.
link = data->links;
if (link) {
if (prev == nullptr)
prev = link;
prev->prev = _unusedLinks;
_unusedLinks = link;
}
// Set as bound (offset is zero or greater and no links).
data->offset = pos;
data->links = nullptr;
if (error != kErrorOk)
return setLastError(error);
_comment = nullptr;
return error;
}
// ============================================================================
// [asmjit::Assembler - Embed]
// ============================================================================
Error Assembler::embed(const void* data, uint32_t size) noexcept {
if (getRemainingSpace() < size) {
Error error = _grow(size);
if (error != kErrorOk)
return setLastError(error);
}
uint8_t* cursor = getCursor();
::memcpy(cursor, data, size);
setCursor(cursor + size);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logBinary(Logger::kStyleData, data, size);
#endif // !ASMJIT_DISABLE_LOGGER
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Reloc]
// ============================================================================
size_t Assembler::relocCode(void* dst, Ptr baseAddress) const noexcept {
if (baseAddress == kNoBaseAddress)
baseAddress = static_cast<Ptr>((uintptr_t)dst);
return _relocCode(dst, baseAddress);
}
// ============================================================================
// [asmjit::Assembler - Make]
// ============================================================================
void* Assembler::make() noexcept {
// Do nothing on error condition or if no instruction has been emitted.
if (_lastError != kErrorOk || getCodeSize() == 0)
return nullptr;
void* p;
Error error = _runtime->add(&p, this);
if (error != kErrorOk)
setLastError(error);
return p;
}
// ============================================================================
// [asmjit::Assembler - Emit (Helpers)]
// ============================================================================
#define NA noOperand
Error Assembler::emit(uint32_t code) {
return _emit(code, NA, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0) {
return _emit(code, o0, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1) {
return _emit(code, o0, o1, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
return _emit(code, o0, o1, o2, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) {
return _emit(code, o0, o1, o2, o3);
}
Error Assembler::emit(uint32_t code, int o0) {
return _emit(code, Imm(o0), NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, int o1) {
return _emit(code, o0, Imm(o1), NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2) {
return _emit(code, o0, o1, Imm(o2), NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3) {
return _emit(code, o0, o1, o2, Imm(o3));
}
Error Assembler::emit(uint32_t code, int64_t o0) {
return _emit(code, Imm(o0), NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, int64_t o1) {
return _emit(code, o0, Imm(o1), NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int64_t o2) {
return _emit(code, o0, o1, Imm(o2), NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int64_t o3) {
return _emit(code, o0, o1, o2, Imm(o3));
}
#undef NA
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,630 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/compiler.h"
#include "../base/compilercontext_p.h"
#include "../base/cpuinfo.h"
#include "../base/logger.h"
#include "../base/utils.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Constants]
// ============================================================================
static const char noName[1] = { '\0' };
enum { kCompilerDefaultLookAhead = 64 };
// ============================================================================
// [asmjit::Compiler - Construction / Destruction]
// ============================================================================
Compiler::Compiler() noexcept
: _features(0),
_maxLookAhead(kCompilerDefaultLookAhead),
_instOptions(0),
_tokenGenerator(0),
_nodeFlowId(0),
_nodeFlags(0),
_targetVarMapping(nullptr),
_firstNode(nullptr),
_lastNode(nullptr),
_cursor(nullptr),
_func(nullptr),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_varAllocator(4096 - Zone::kZoneOverhead),
_stringAllocator(4096 - Zone::kZoneOverhead),
_constAllocator(4096 - Zone::kZoneOverhead),
_localConstPool(&_constAllocator),
_globalConstPool(&_zoneAllocator) {}
Compiler::~Compiler() noexcept {}
// ============================================================================
// [asmjit::Compiler - Attach / Reset]
// ============================================================================
void Compiler::reset(bool releaseMemory) noexcept {
Assembler* assembler = getAssembler();
if (assembler != nullptr)
assembler->_detached(this);
_arch = kArchNone;
_regSize = 0;
_finalized = false;
_lastError = kErrorNotInitialized;
_features = 0;
_maxLookAhead = kCompilerDefaultLookAhead;
_instOptions = 0;
_tokenGenerator = 0;
_nodeFlowId = 0;
_nodeFlags = 0;
_firstNode = nullptr;
_lastNode = nullptr;
_cursor = nullptr;
_func = nullptr;
_localConstPool.reset();
_globalConstPool.reset();
_localConstPoolLabel.reset();
_globalConstPoolLabel.reset();
_zoneAllocator.reset(releaseMemory);
_varAllocator.reset(releaseMemory);
_stringAllocator.reset(releaseMemory);
_constAllocator.reset(releaseMemory);
_varList.reset(releaseMemory);
}
// ============================================================================
// [asmjit::Compiler - Node-Factory]
// ============================================================================
HLData* Compiler::newDataNode(const void* data, uint32_t size) noexcept {
if (size > HLData::kInlineBufferSize) {
void* clonedData = _stringAllocator.alloc(size);
if (clonedData == nullptr)
return nullptr;
if (data != nullptr)
::memcpy(clonedData, data, size);
data = clonedData;
}
return newNode<HLData>(const_cast<void*>(data), size);
}
HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) noexcept {
return newNode<HLAlign>(alignMode, offset);
}
HLLabel* Compiler::newLabelNode() noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return nullptr;
uint32_t id = assembler->_newLabelId();
LabelData* ld = assembler->getLabelData(id);
HLLabel* node = newNode<HLLabel>(id);
if (node == nullptr) return nullptr;
// These have to be zero now.
ASMJIT_ASSERT(ld->exId == 0);
ASMJIT_ASSERT(ld->exData == nullptr);
ld->exId = _exId;
ld->exData = node;
return node;
}
HLComment* Compiler::newCommentNode(const char* str) noexcept {
if (str != nullptr && str[0]) {
str = _stringAllocator.sdup(str);
if (str == nullptr)
return nullptr;
}
return newNode<HLComment>(str);
}
HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept {
if (var.getId() == kInvalidValue)
return nullptr;
VarData* vd = getVd(var);
return newNode<HLHint>(vd, hint, value);
}
// ============================================================================
// [asmjit::Compiler - Code-Stream]
// ============================================================================
HLNode* Compiler::addFunc(HLFunc* func) noexcept {
ASMJIT_ASSERT(_func == nullptr);
_func = func;
addNode(func); // Add function node.
addNode(func->getEntryNode()); // Add function entry.
HLNode* cursor = getCursor();
addNode(func->getExitNode()); // Add function exit / epilog marker.
addNode(func->getEnd()); // Add function end.
setCursor(cursor);
return func;
}
HLNode* Compiler::addNode(HLNode* node) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
if (_cursor == nullptr) {
if (_firstNode == nullptr) {
_firstNode = node;
_lastNode = node;
}
else {
node->_next = _firstNode;
_firstNode->_prev = node;
_firstNode = node;
}
}
else {
HLNode* prev = _cursor;
HLNode* next = _cursor->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
}
_cursor = node;
return node;
}
HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
ASMJIT_ASSERT(ref != nullptr);
HLNode* prev = ref->_prev;
HLNode* next = ref;
node->_prev = prev;
node->_next = next;
next->_prev = node;
if (prev)
prev->_next = node;
else
_firstNode = node;
return node;
}
HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
ASMJIT_ASSERT(ref != nullptr);
HLNode* prev = ref;
HLNode* next = ref->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
return node;
}
static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) noexcept {
if (node_->isJmpOrJcc()) {
HLJump* node = static_cast<HLJump*>(node_);
HLLabel* label = node->getTarget();
if (label != nullptr) {
// Disconnect.
HLJump** pPrev = &label->_from;
for (;;) {
ASMJIT_ASSERT(*pPrev != nullptr);
HLJump* current = *pPrev;
if (current == nullptr)
break;
if (current == node) {
*pPrev = node->_jumpNext;
break;
}
pPrev = &current->_jumpNext;
}
label->subNumRefs();
}
}
}
HLNode* Compiler::removeNode(HLNode* node) noexcept {
HLNode* prev = node->_prev;
HLNode* next = node->_next;
if (_firstNode == node)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == node)
_lastNode = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
Compiler_nodeRemoved(this, node);
return node;
}
void Compiler::removeNodes(HLNode* first, HLNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
HLNode* prev = first->_prev;
HLNode* next = last->_next;
if (_firstNode == first)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == last)
_lastNode = prev;
else
next->_prev = prev;
HLNode* node = first;
for (;;) {
HLNode* next = node->getNext();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
Compiler_nodeRemoved(this, node);
if (node == last)
break;
node = next;
}
}
HLNode* Compiler::setCursor(HLNode* node) noexcept {
HLNode* old = _cursor;
_cursor = node;
return old;
}
// ============================================================================
// [asmjit::Compiler - Align]
// ============================================================================
Error Compiler::align(uint32_t alignMode, uint32_t offset) noexcept {
HLAlign* node = newAlignNode(alignMode, offset);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Label]
// ============================================================================
HLLabel* Compiler::getHLLabel(uint32_t id) const noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return nullptr;
LabelData* ld = assembler->getLabelData(id);
if (ld->exId == _exId)
return static_cast<HLLabel*>(ld->exData);
else
return nullptr;
}
bool Compiler::isLabelValid(uint32_t id) const noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return false;
return static_cast<size_t>(id) < assembler->getLabelsCount();
}
uint32_t Compiler::_newLabelId() noexcept {
HLLabel* node = newLabelNode();
if (node == nullptr) {
setLastError(kErrorNoHeapMemory);
return kInvalidValue;
}
return node->getLabelId();
}
Error Compiler::bind(const Label& label) noexcept {
HLLabel* node = getHLLabel(label);
if (node == nullptr)
return setLastError(kErrorInvalidState);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Embed]
// ============================================================================
Error Compiler::embed(const void* data, uint32_t size) noexcept {
HLData* node = newDataNode(data, size);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) noexcept {
if (label.getId() == kInvalidValue)
return kErrorInvalidState;
align(kAlignData, static_cast<uint32_t>(pool.getAlignment()));
bind(label);
HLData* embedNode = newDataNode(nullptr, static_cast<uint32_t>(pool.getSize()));
if (embedNode == nullptr)
return kErrorNoHeapMemory;
pool.fill(embedNode->getData());
addNode(embedNode);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Comment]
// ============================================================================
Error Compiler::comment(const char* fmt, ...) noexcept {
char buf[256];
char* p = buf;
if (fmt) {
va_list ap;
va_start(ap, fmt);
p += vsnprintf(p, 254, fmt, ap);
va_end(ap);
}
p[0] = '\0';
HLComment* node = newCommentNode(buf);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Hint]
// ============================================================================
Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
HLHint* node = newHintNode(var, hint, value);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::Compiler - Vars]
// ============================================================================
VarData* Compiler::_newVd(const VarInfo& vi, const char* name) noexcept {
VarData* vd = reinterpret_cast<VarData*>(_varAllocator.alloc(sizeof(VarData)));
if (ASMJIT_UNLIKELY(vd == nullptr))
goto _NoMemory;
vd->_name = noName;
vd->_id = OperandUtil::makeVarId(static_cast<uint32_t>(_varList.getLength()));
vd->_localId = kInvalidValue;
#if !defined(ASMJIT_DISABLE_LOGGER)
if (name != nullptr && name[0] != '\0') {
vd->_name = _stringAllocator.sdup(name);
}
#endif // !ASMJIT_DISABLE_LOGGER
vd->_type = static_cast<uint8_t>(vi.getTypeId());
vd->_class = static_cast<uint8_t>(vi.getRegClass());
vd->_flags = 0;
vd->_priority = 10;
vd->_state = kVarStateNone;
vd->_regIndex = kInvalidReg;
vd->_isStack = false;
vd->_isMemArg = false;
vd->_isCalculated = false;
vd->_saveOnUnuse = false;
vd->_modified = false;
vd->_reserved0 = 0;
vd->_alignment = static_cast<uint8_t>(Utils::iMin<uint32_t>(vi.getSize(), 64));
vd->_size = vi.getSize();
vd->_homeMask = 0;
vd->_memOffset = 0;
vd->_memCell = nullptr;
vd->rReadCount = 0;
vd->rWriteCount = 0;
vd->mReadCount = 0;
vd->mWriteCount = 0;
vd->_va = nullptr;
if (ASMJIT_UNLIKELY(_varList.append(vd) != kErrorOk))
goto _NoMemory;
return vd;
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
Error Compiler::alloc(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, kInvalidValue);
}
Error Compiler::alloc(Var& var, uint32_t regIndex) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, regIndex);
}
Error Compiler::alloc(Var& var, const Reg& reg) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, reg.getRegIndex());
}
Error Compiler::save(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintSave, kInvalidValue);
}
Error Compiler::spill(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintSpill, kInvalidValue);
}
Error Compiler::unuse(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintUnuse, kInvalidValue);
}
uint32_t Compiler::getPriority(Var& var) const noexcept {
if (var.getId() == kInvalidValue)
return kInvalidValue;
VarData* vd = getVdById(var.getId());
return vd->getPriority();
}
void Compiler::setPriority(Var& var, uint32_t priority) noexcept {
if (var.getId() == kInvalidValue)
return;
if (priority > 255)
priority = 255;
VarData* vd = getVdById(var.getId());
vd->_priority = static_cast<uint8_t>(priority);
}
bool Compiler::getSaveOnUnuse(Var& var) const noexcept {
if (var.getId() == kInvalidValue)
return false;
VarData* vd = getVdById(var.getId());
return static_cast<bool>(vd->_saveOnUnuse);
}
void Compiler::setSaveOnUnuse(Var& var, bool value) noexcept {
if (var.getId() == kInvalidValue)
return;
VarData* vd = getVdById(var.getId());
vd->_saveOnUnuse = value;
}
void Compiler::rename(Var& var, const char* fmt, ...) noexcept {
if (var.getId() == kInvalidValue)
return;
VarData* vd = getVdById(var.getId());
vd->_name = noName;
if (fmt != nullptr && fmt[0] != '\0') {
char buf[64];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
vd->_name = _stringAllocator.sdup(buf);
va_end(ap);
}
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View File

@ -0,0 +1,576 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_COMPILER_H
#define _ASMJIT_BASE_COMPILER_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/compilerfunc.h"
#include "../base/constpool.h"
#include "../base/containers.h"
#include "../base/hlstream.h"
#include "../base/operand.h"
#include "../base/podvector.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct VarAttr;
struct VarData;
struct VarMap;
struct VarState;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CompilerFeatures]
// ============================================================================
ASMJIT_ENUM(CompilerFeatures) {
//! Schedule instructions so they can be executed faster (`Compiler` only).
//!
//! Default `false` - has to be explicitly enabled as the scheduler needs
//! some time to run.
//!
//! X86/X64 Specific
//! ----------------
//!
//! If scheduling is enabled AsmJit will try to reorder instructions to
//! minimize the dependency chain. Scheduler always runs after the registers
//! are allocated so it doesn't change count of register allocs/spills.
//!
//! This feature is highly experimental and untested.
kCompilerFeatureEnableScheduler = 0
};
// ============================================================================
// [asmjit::ConstScope]
// ============================================================================
//! Scope of the constant.
ASMJIT_ENUM(ConstScope) {
//! Local constant, always embedded right after the current function.
kConstScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kConstScopeGlobal = 1
};
// ============================================================================
// [asmjit::VarInfo]
// ============================================================================
struct VarInfo {
// ============================================================================
// [Flags]
// ============================================================================
//! \internal
//!
//! Variable flags.
ASMJIT_ENUM(Flags) {
//! Variable contains one or more single-precision floating point.
kFlagSP = 0x10,
//! Variable contains one or more double-precision floating point.
kFlagDP = 0x20,
//! Variable is a vector, contains packed data.
kFlagSIMD = 0x80
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get type id.
ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; }
//! Get type name.
ASMJIT_INLINE const char* getTypeName() const noexcept { return _typeName; }
//! Get register size in bytes.
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
//! Get variable class, see \ref RegClass.
ASMJIT_INLINE uint32_t getRegClass() const noexcept { return _regClass; }
//! Get register type, see `X86RegType`.
ASMJIT_INLINE uint32_t getRegType() const noexcept { return _regType; }
//! Get type flags, see `VarFlag`.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable type id.
uint8_t _typeId;
//! Variable and register size (in bytes).
uint8_t _size;
//! Register class, see `RegClass`.
uint8_t _regClass;
//! Register type the variable is mapped to.
uint8_t _regType;
//! Variable info flags, see \ref Flags.
uint32_t _flags;
//! Variable type name.
char _typeName[8];
};
// ============================================================================
// [asmjit::Compiler]
// ============================================================================
//! Compiler interface.
//!
//! \sa Assembler.
class ASMJIT_VIRTAPI Compiler : public ExternalTool {
public:
ASMJIT_NO_COPY(Compiler)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `Compiler` instance.
ASMJIT_API Compiler() noexcept;
//! Destroy the `Compiler` instance.
ASMJIT_API virtual ~Compiler() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! \override
ASMJIT_API virtual void reset(bool releaseMemory) noexcept;
// --------------------------------------------------------------------------
// [Compiler Features]
// --------------------------------------------------------------------------
//! Get code-generator features.
ASMJIT_INLINE uint32_t getFeatures() const noexcept {
return _features;
}
//! Set code-generator features.
ASMJIT_INLINE void setFeatures(uint32_t features) noexcept {
_features = features;
}
//! Get code-generator `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept {
ASMJIT_ASSERT(feature < 32);
return (_features & (1 << feature)) != 0;
}
//! Set code-generator `feature` to `value`.
ASMJIT_INLINE void setFeature(uint32_t feature, bool value) noexcept {
ASMJIT_ASSERT(feature < 32);
feature = static_cast<uint32_t>(value) << feature;
_features = (_features & ~feature) | feature;
}
//! Get maximum look ahead.
ASMJIT_INLINE uint32_t getMaxLookAhead() const noexcept {
return _maxLookAhead;
}
//! Set maximum look ahead to `val`.
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) noexcept {
_maxLookAhead = val;
}
// --------------------------------------------------------------------------
// [Token ID]
// --------------------------------------------------------------------------
//! \internal
//!
//! Reset the token-id generator.
ASMJIT_INLINE void _resetTokenGenerator() noexcept {
_tokenGenerator = 0;
}
//! \internal
//!
//! Generate a new unique token id.
ASMJIT_INLINE uint32_t _generateUniqueToken() noexcept {
return ++_tokenGenerator;
}
// --------------------------------------------------------------------------
// [Instruction Options]
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getInstOptions() const noexcept {
return _instOptions;
}
//! Set options of the next instruction.
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) noexcept {
_instOptions = instOptions;
}
//! Get options of the next instruction and reset them.
ASMJIT_INLINE uint32_t getInstOptionsAndReset() {
uint32_t instOptions = _instOptions;
_instOptions = 0;
return instOptions;
};
// --------------------------------------------------------------------------
// [Node-Factory]
// --------------------------------------------------------------------------
//! \internal
template<typename T>
ASMJIT_INLINE T* newNode() noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this);
}
//! \internal
template<typename T, typename P0>
ASMJIT_INLINE T* newNode(P0 p0) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0);
}
//! \internal
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0, p1);
}
//! \internal
template<typename T, typename P0, typename P1, typename P2>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0, p1, p2);
}
//! \internal
//!
//! Create a new `HLData` node.
ASMJIT_API HLData* newDataNode(const void* data, uint32_t size) noexcept;
//! \internal
//!
//! Create a new `HLAlign` node.
ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset) noexcept;
//! \internal
//!
//! Create a new `HLLabel` node.
ASMJIT_API HLLabel* newLabelNode() noexcept;
//! \internal
//!
//! Create a new `HLComment`.
ASMJIT_API HLComment* newCommentNode(const char* str) noexcept;
//! \internal
//!
//! Create a new `HLHint`.
ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Code-Stream]
// --------------------------------------------------------------------------
//! Add a function `node` to the stream.
ASMJIT_API HLNode* addFunc(HLFunc* func) noexcept;
//! Add node `node` after current and set current to `node`.
ASMJIT_API HLNode* addNode(HLNode* node) noexcept;
//! Insert `node` before `ref`.
ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref) noexcept;
//! Insert `node` after `ref`.
ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref) noexcept;
//! Remove `node`.
ASMJIT_API HLNode* removeNode(HLNode* node) noexcept;
//! Remove multiple nodes.
ASMJIT_API void removeNodes(HLNode* first, HLNode* last) noexcept;
//! Get the first node.
ASMJIT_INLINE HLNode* getFirstNode() const noexcept { return _firstNode; }
//! Get the last node.
ASMJIT_INLINE HLNode* getLastNode() const noexcept { return _lastNode; }
//! Get current node.
//!
//! \note If this method returns `nullptr` it means that nothing has been
//! emitted yet.
ASMJIT_INLINE HLNode* getCursor() const noexcept { return _cursor; }
//! \internal
//!
//! Set the current node without returning the previous node.
ASMJIT_INLINE void _setCursor(HLNode* node) noexcept { _cursor = node; }
//! Set the current node to `node` and return the previous one.
ASMJIT_API HLNode* setCursor(HLNode* node) noexcept;
// --------------------------------------------------------------------------
// [Func]
// --------------------------------------------------------------------------
//! Get current function.
ASMJIT_INLINE HLFunc* getFunc() const noexcept { return _func; }
// --------------------------------------------------------------------------
// [Align]
// --------------------------------------------------------------------------
//! Align target buffer to the `offset` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current depends on `alignMode`, see \ref AlignMode.
ASMJIT_API Error align(uint32_t alignMode, uint32_t offset) noexcept;
// --------------------------------------------------------------------------
// [Label]
// --------------------------------------------------------------------------
//! Get `HLLabel` by `id`.
//!
//! NOTE: The label has to be valid, see `isLabelValid()`.
ASMJIT_API HLLabel* getHLLabel(uint32_t id) const noexcept;
//! Get `HLLabel` by `label`.
//!
//! NOTE: The label has to be valid, see `isLabelValid()`.
ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) noexcept {
return getHLLabel(label.getId());
}
//! Get whether the label `id` is valid.
ASMJIT_API bool isLabelValid(uint32_t id) const noexcept;
//! Get whether the `label` is valid.
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! \internal
//!
//! Create a new label and return its ID.
ASMJIT_API uint32_t _newLabelId() noexcept;
//! Create and return a new `Label`.
ASMJIT_INLINE Label newLabel() noexcept { return Label(_newLabelId()); }
//! Bind label to the current offset.
//!
//! NOTE: Label can be bound only once!
ASMJIT_API Error bind(const Label& label) noexcept;
// --------------------------------------------------------------------------
// [Embed]
// --------------------------------------------------------------------------
//! Embed data.
ASMJIT_API Error embed(const void* data, uint32_t size) noexcept;
//! Embed a constant pool data, adding the following in order:
//! 1. Data alignment.
//! 2. Label.
//! 3. Constant pool data.
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) noexcept;
// --------------------------------------------------------------------------
// [Comment]
// --------------------------------------------------------------------------
//! Emit a single comment line.
ASMJIT_API Error comment(const char* fmt, ...) noexcept;
// --------------------------------------------------------------------------
// [Hint]
// --------------------------------------------------------------------------
//! Emit a new hint (purery informational node).
ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Vars]
// --------------------------------------------------------------------------
//! Get whether variable `var` is created.
ASMJIT_INLINE bool isVarValid(const Var& var) const noexcept {
return static_cast<size_t>(var.getId() & Operand::kIdIndexMask) < _varList.getLength();
}
//! \internal
//!
//! Get `VarData` by `var`.
ASMJIT_INLINE VarData* getVd(const Var& var) const noexcept {
return getVdById(var.getId());
}
//! \internal
//!
//! Get `VarData` by `id`.
ASMJIT_INLINE VarData* getVdById(uint32_t id) const noexcept {
ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(static_cast<size_t>(id & Operand::kIdIndexMask) < _varList.getLength());
return _varList[id & Operand::kIdIndexMask];
}
//! \internal
//!
//! Get an array of 'VarData*'.
ASMJIT_INLINE VarData** _getVdArray() const noexcept {
return const_cast<VarData**>(_varList.getData());
}
//! \internal
//!
//! Create a new `VarData`.
ASMJIT_API VarData* _newVd(const VarInfo& vi, const char* name) noexcept;
//! Alloc variable `var`.
ASMJIT_API Error alloc(Var& var) noexcept;
//! Alloc variable `var` using `regIndex` as a register index.
ASMJIT_API Error alloc(Var& var, uint32_t regIndex) noexcept;
//! Alloc variable `var` using `reg` as a register operand.
ASMJIT_API Error alloc(Var& var, const Reg& reg) noexcept;
//! Spill variable `var`.
ASMJIT_API Error spill(Var& var) noexcept;
//! Save variable `var` if the status is `modified` at this point.
ASMJIT_API Error save(Var& var) noexcept;
//! Unuse variable `var`.
ASMJIT_API Error unuse(Var& var) noexcept;
//! Get priority of variable `var`.
ASMJIT_API uint32_t getPriority(Var& var) const noexcept;
//! Set priority of variable `var` to `priority`.
ASMJIT_API void setPriority(Var& var, uint32_t priority) noexcept;
//! Get save-on-unuse `var` property.
ASMJIT_API bool getSaveOnUnuse(Var& var) const noexcept;
//! Set save-on-unuse `var` property to `value`.
ASMJIT_API void setSaveOnUnuse(Var& var, bool value) noexcept;
//! Rename variable `var` to `name`.
//!
//! NOTE: Only new name will appear in the logger.
ASMJIT_API void rename(Var& var, const char* fmt, ...) noexcept;
// --------------------------------------------------------------------------
// [Stack]
// --------------------------------------------------------------------------
//! \internal
//!
//! Create a new memory chunk allocated on the current function's stack.
virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept = 0;
// --------------------------------------------------------------------------
// [Const]
// --------------------------------------------------------------------------
//! \internal
//!
//! Put data to a constant-pool and get a memory reference to it.
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Code-Generation features, used by \ref hasFeature() and \ref setFeature().
uint32_t _features;
//! Maximum count of nodes to look ahead when allocating/spilling
//! registers.
uint32_t _maxLookAhead;
//! Options affecting the next instruction.
uint32_t _instOptions;
//! Processing token generator.
//!
//! Used to get a unique token that is then used to process `HLNode`s. See
//! `Compiler::_getUniqueToken()` for more details.
uint32_t _tokenGenerator;
//! Flow id added to each node created (used only by `Context)`.
uint32_t _nodeFlowId;
//! Flags added to each node created (used only by `Context)`.
uint32_t _nodeFlags;
//! Variable mapping (translates incoming VarType into target).
const uint8_t* _targetVarMapping;
//! First node.
HLNode* _firstNode;
//! Last node.
HLNode* _lastNode;
//! Current node.
HLNode* _cursor;
//! Current function.
HLFunc* _func;
//! General purpose zone allocator.
Zone _zoneAllocator;
//! Variable zone.
Zone _varAllocator;
//! String/data zone.
Zone _stringAllocator;
//! Local constant pool zone.
Zone _constAllocator;
//! VarData list.
PodVector<VarData*> _varList;
//! Local constant pool, flushed at the end of each function.
ConstPool _localConstPool;
//! Global constant pool, flushed at the end of the compilation.
ConstPool _globalConstPool;
//! Label to start of the local constant pool.
Label _localConstPoolLabel;
//! Label to start of the global constant pool.
Label _globalConstPoolLabel;
};
//! \}
// ============================================================================
// [Defined-Later]
// ============================================================================
ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) noexcept {
_prev = nullptr;
_next = nullptr;
_type = static_cast<uint8_t>(type);
_opCount = 0;
_flags = static_cast<uint16_t>(compiler->_nodeFlags);
_flowId = compiler->_nodeFlowId;
_tokenId = 0;
_comment = nullptr;
_map = nullptr;
_liveness = nullptr;
_state = nullptr;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_COMPILER_H

View File

@ -0,0 +1,653 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compilercontext_p.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Context - Construction / Destruction]
// ============================================================================
Context::Context(Compiler* compiler) :
_compiler(compiler),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_traceNode(nullptr),
_varMapToVaListOffset(0) {
Context::reset();
}
Context::~Context() {}
// ============================================================================
// [asmjit::Context - Reset]
// ============================================================================
void Context::reset(bool releaseMemory) {
_zoneAllocator.reset(releaseMemory);
_func = nullptr;
_start = nullptr;
_end = nullptr;
_extraBlock = nullptr;
_stop = nullptr;
_unreachableList.reset();
_returningList.reset();
_jccList.reset();
_contextVd.reset(releaseMemory);
_memVarCells = nullptr;
_memStackCells = nullptr;
_mem1ByteVarsUsed = 0;
_mem2ByteVarsUsed = 0;
_mem4ByteVarsUsed = 0;
_mem8ByteVarsUsed = 0;
_mem16ByteVarsUsed = 0;
_mem32ByteVarsUsed = 0;
_mem64ByteVarsUsed = 0;
_memStackCellsUsed = 0;
_memMaxAlign = 0;
_memVarTotal = 0;
_memStackTotal = 0;
_memAllTotal = 0;
_annotationLength = 12;
_state = nullptr;
}
// ============================================================================
// [asmjit::Context - Mem]
// ============================================================================
static ASMJIT_INLINE uint32_t BaseContext_getDefaultAlignment(uint32_t size) {
if (size > 32)
return 64;
else if (size > 16)
return 32;
else if (size > 8)
return 16;
else if (size > 4)
return 8;
else if (size > 2)
return 4;
else if (size > 1)
return 2;
else
return 1;
}
VarCell* Context::_newVarCell(VarData* vd) {
ASMJIT_ASSERT(vd->_memCell == nullptr);
VarCell* cell;
uint32_t size = vd->getSize();
if (vd->isStack()) {
cell = _newStackCell(size, vd->getAlignment());
if (cell == nullptr)
return nullptr;
}
else {
cell = static_cast<VarCell*>(_zoneAllocator.alloc(sizeof(VarCell)));
if (cell == nullptr)
goto _NoMemory;
cell->_next = _memVarCells;
_memVarCells = cell;
cell->_offset = 0;
cell->_size = size;
cell->_alignment = size;
_memMaxAlign = Utils::iMax<uint32_t>(_memMaxAlign, size);
_memVarTotal += size;
switch (size) {
case 1: _mem1ByteVarsUsed++ ; break;
case 2: _mem2ByteVarsUsed++ ; break;
case 4: _mem4ByteVarsUsed++ ; break;
case 8: _mem8ByteVarsUsed++ ; break;
case 16: _mem16ByteVarsUsed++; break;
case 32: _mem32ByteVarsUsed++; break;
case 64: _mem64ByteVarsUsed++; break;
default:
ASMJIT_NOT_REACHED();
}
}
vd->_memCell = cell;
return cell;
_NoMemory:
_compiler->setLastError(kErrorNoHeapMemory);
return nullptr;
}
VarCell* Context::_newStackCell(uint32_t size, uint32_t alignment) {
VarCell* cell = static_cast<VarCell*>(_zoneAllocator.alloc(sizeof(VarCell)));
if (cell == nullptr)
goto _NoMemory;
if (alignment == 0)
alignment = BaseContext_getDefaultAlignment(size);
if (alignment > 64)
alignment = 64;
ASMJIT_ASSERT(Utils::isPowerOf2(alignment));
size = Utils::alignTo<uint32_t>(size, alignment);
// Insert it sorted according to the alignment and size.
{
VarCell** pPrev = &_memStackCells;
VarCell* cur = *pPrev;
while (cur != nullptr) {
if ((cur->getAlignment() > alignment) ||
(cur->getAlignment() == alignment && cur->getSize() > size)) {
pPrev = &cur->_next;
cur = *pPrev;
continue;
}
break;
}
cell->_next = cur;
cell->_offset = 0;
cell->_size = size;
cell->_alignment = alignment;
*pPrev = cell;
_memStackCellsUsed++;
_memMaxAlign = Utils::iMax<uint32_t>(_memMaxAlign, alignment);
_memStackTotal += size;
}
return cell;
_NoMemory:
_compiler->setLastError(kErrorNoHeapMemory);
return nullptr;
}
Error Context::resolveCellOffsets() {
VarCell* varCell = _memVarCells;
VarCell* stackCell = _memStackCells;
uint32_t stackAlignment = 0;
if (stackCell != nullptr)
stackAlignment = stackCell->getAlignment();
uint32_t pos64 = 0;
uint32_t pos32 = pos64 + _mem64ByteVarsUsed * 64;
uint32_t pos16 = pos32 + _mem32ByteVarsUsed * 32;
uint32_t pos8 = pos16 + _mem16ByteVarsUsed * 16;
uint32_t pos4 = pos8 + _mem8ByteVarsUsed * 8 ;
uint32_t pos2 = pos4 + _mem4ByteVarsUsed * 4 ;
uint32_t pos1 = pos2 + _mem2ByteVarsUsed * 2 ;
uint32_t stackPos = pos1 + _mem1ByteVarsUsed;
uint32_t gapAlignment = stackAlignment;
uint32_t gapSize = 0;
// TODO: Not used!
if (gapAlignment)
Utils::alignDiff(stackPos, gapAlignment);
stackPos += gapSize;
uint32_t gapPos = stackPos;
uint32_t allTotal = stackPos;
// Vars - Allocated according to alignment/width.
while (varCell != nullptr) {
uint32_t size = varCell->getSize();
uint32_t offset = 0;
switch (size) {
case 1: offset = pos1 ; pos1 += 1 ; break;
case 2: offset = pos2 ; pos2 += 2 ; break;
case 4: offset = pos4 ; pos4 += 4 ; break;
case 8: offset = pos8 ; pos8 += 8 ; break;
case 16: offset = pos16; pos16 += 16; break;
case 32: offset = pos32; pos32 += 32; break;
case 64: offset = pos64; pos64 += 64; break;
default:
ASMJIT_NOT_REACHED();
}
varCell->setOffset(static_cast<int32_t>(offset));
varCell = varCell->_next;
}
// Stack - Allocated according to alignment/width.
while (stackCell != nullptr) {
uint32_t size = stackCell->getSize();
uint32_t alignment = stackCell->getAlignment();
uint32_t offset;
// Try to fill the gap between variables/stack first.
if (size <= gapSize && alignment <= gapAlignment) {
offset = gapPos;
gapSize -= size;
gapPos -= size;
if (alignment < gapAlignment)
gapAlignment = alignment;
}
else {
offset = stackPos;
stackPos += size;
allTotal += size;
}
stackCell->setOffset(offset);
stackCell = stackCell->_next;
}
_memAllTotal = allTotal;
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - RemoveUnreachableCode]
// ============================================================================
Error Context::removeUnreachableCode() {
Compiler* compiler = getCompiler();
PodList<HLNode*>::Link* link = _unreachableList.getFirst();
HLNode* stop = getStop();
while (link != nullptr) {
HLNode* node = link->getValue();
if (node != nullptr && node->getPrev() != nullptr && node != stop) {
// Locate all unreachable nodes.
HLNode* first = node;
do {
if (node->isFetched())
break;
node = node->getNext();
} while (node != stop);
// Remove unreachable nodes that are neither informative nor directives.
if (node != first) {
HLNode* end = node;
node = first;
// NOTE: The strategy is as follows:
// 1. The algorithm removes everything until it finds a first label.
// 2. After the first label is found it removes only removable nodes.
bool removeEverything = true;
do {
HLNode* next = node->getNext();
bool remove = node->isRemovable();
if (!remove) {
if (node->isLabel())
removeEverything = false;
remove = removeEverything;
}
if (remove) {
ASMJIT_TSEC({
this->_traceNode(this, node, "[REMOVED UNREACHABLE] ");
});
compiler->removeNode(node);
}
node = next;
} while (node != end);
}
}
link = link->getNext();
}
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - Liveness Analysis]
// ============================================================================
//! \internal
struct LivenessTarget {
//! Previous target.
LivenessTarget* prev;
//! Target node.
HLLabel* node;
//! Jumped from.
HLJump* from;
};
Error Context::livenessAnalysis() {
uint32_t bLen = static_cast<uint32_t>(
((_contextVd.getLength() + BitArray::kEntityBits - 1) / BitArray::kEntityBits));
// No variables.
if (bLen == 0)
return kErrorOk;
HLFunc* func = getFunc();
HLJump* from = nullptr;
LivenessTarget* ltCur = nullptr;
LivenessTarget* ltUnused = nullptr;
PodList<HLNode*>::Link* retPtr = _returningList.getFirst();
ASMJIT_ASSERT(retPtr != nullptr);
HLNode* node = retPtr->getValue();
size_t varMapToVaListOffset = _varMapToVaListOffset;
BitArray* bCur = newBits(bLen);
if (bCur == nullptr)
goto _NoMemory;
// Allocate bits for code visited first time.
_OnVisit:
for (;;) {
if (node->hasLiveness()) {
if (bCur->_addBitsDelSource(node->getLiveness(), bCur, bLen))
goto _OnPatch;
else
goto _OnDone;
}
BitArray* bTmp = copyBits(bCur, bLen);
if (bTmp == nullptr)
goto _NoMemory;
node->setLiveness(bTmp);
VarMap* map = node->getMap();
if (map != nullptr) {
uint32_t vaCount = map->getVaCount();
VarAttr* vaList = reinterpret_cast<VarAttr*>(((uint8_t*)map) + varMapToVaListOffset);
for (uint32_t i = 0; i < vaCount; i++) {
VarAttr* va = &vaList[i];
VarData* vd = va->getVd();
uint32_t flags = va->getFlags();
uint32_t localId = vd->getLocalId();
if ((flags & kVarAttrWAll) && !(flags & kVarAttrRAll)) {
// Write-Only.
bTmp->setBit(localId);
bCur->delBit(localId);
}
else {
// Read-Only or Read/Write.
bTmp->setBit(localId);
bCur->setBit(localId);
}
}
}
if (node->getType() == HLNode::kTypeLabel)
goto _OnTarget;
if (node == func)
goto _OnDone;
ASMJIT_ASSERT(node->getPrev());
node = node->getPrev();
}
// Patch already generated liveness bits.
_OnPatch:
for (;;) {
ASMJIT_ASSERT(node->hasLiveness());
BitArray* bNode = node->getLiveness();
if (!bNode->_addBitsDelSource(bCur, bLen))
goto _OnDone;
if (node->getType() == HLNode::kTypeLabel)
goto _OnTarget;
if (node == func)
goto _OnDone;
node = node->getPrev();
}
_OnTarget:
if (static_cast<HLLabel*>(node)->getNumRefs() != 0) {
// Push a new LivenessTarget onto the stack if needed.
if (ltCur == nullptr || ltCur->node != node) {
// Allocate a new LivenessTarget object (from pool or zone).
LivenessTarget* ltTmp = ltUnused;
if (ltTmp != nullptr) {
ltUnused = ltUnused->prev;
}
else {
ltTmp = _zoneAllocator.allocT<LivenessTarget>(
sizeof(LivenessTarget) - sizeof(BitArray) + bLen * sizeof(uintptr_t));
if (ltTmp == nullptr)
goto _NoMemory;
}
// Initialize and make current - ltTmp->from will be set later on.
ltTmp->prev = ltCur;
ltTmp->node = static_cast<HLLabel*>(node);
ltCur = ltTmp;
from = static_cast<HLLabel*>(node)->getFrom();
ASMJIT_ASSERT(from != nullptr);
}
else {
from = ltCur->from;
goto _OnJumpNext;
}
// Visit/Patch.
do {
ltCur->from = from;
bCur->copyBits(node->getLiveness(), bLen);
if (!from->hasLiveness()) {
node = from;
goto _OnVisit;
}
// Issue #25: Moved '_OnJumpNext' here since it's important to patch
// code again if there are more live variables than before.
_OnJumpNext:
if (bCur->delBits(from->getLiveness(), bLen)) {
node = from;
goto _OnPatch;
}
from = from->getJumpNext();
} while (from != nullptr);
// Pop the current LivenessTarget from the stack.
{
LivenessTarget* ltTmp = ltCur;
ltCur = ltCur->prev;
ltTmp->prev = ltUnused;
ltUnused = ltTmp;
}
}
bCur->copyBits(node->getLiveness(), bLen);
node = node->getPrev();
if (node->isJmp() || !node->isFetched())
goto _OnDone;
if (!node->hasLiveness())
goto _OnVisit;
if (bCur->delBits(node->getLiveness(), bLen))
goto _OnPatch;
_OnDone:
if (ltCur != nullptr) {
node = ltCur->node;
from = ltCur->from;
goto _OnJumpNext;
}
retPtr = retPtr->getNext();
if (retPtr != nullptr) {
node = retPtr->getValue();
goto _OnVisit;
}
return kErrorOk;
_NoMemory:
return setLastError(kErrorNoHeapMemory);
}
// ============================================================================
// [asmjit::Context - Annotate]
// ============================================================================
Error Context::formatInlineComment(StringBuilder& dst, HLNode* node) {
#if !defined(ASMJIT_DISABLE_LOGGER)
if (node->getComment())
dst.appendString(node->getComment());
if (node->hasLiveness()) {
if (dst.getLength() < _annotationLength)
dst.appendChars(' ', _annotationLength - dst.getLength());
uint32_t vdCount = static_cast<uint32_t>(_contextVd.getLength());
size_t offset = dst.getLength() + 1;
dst.appendChar('[');
dst.appendChars(' ', vdCount);
dst.appendChar(']');
BitArray* liveness = node->getLiveness();
VarMap* map = node->getMap();
uint32_t i;
for (i = 0; i < vdCount; i++) {
if (liveness->getBit(i))
dst.getData()[offset + i] = '.';
}
if (map != nullptr) {
uint32_t vaCount = map->getVaCount();
VarAttr* vaList = reinterpret_cast<VarAttr*>(((uint8_t*)map) + _varMapToVaListOffset);
for (i = 0; i < vaCount; i++) {
VarAttr* va = &vaList[i];
VarData* vd = va->getVd();
uint32_t flags = va->getFlags();
char c = 'u';
if ( (flags & kVarAttrRAll) && !(flags & kVarAttrWAll)) c = 'r';
if (!(flags & kVarAttrRAll) && (flags & kVarAttrWAll)) c = 'w';
if ( (flags & kVarAttrRAll) && (flags & kVarAttrWAll)) c = 'x';
// Uppercase if unused.
if ((flags & kVarAttrUnuse))
c -= 'a' - 'A';
ASMJIT_ASSERT(offset + vd->getLocalId() < dst.getLength());
dst._data[offset + vd->getLocalId()] = c;
}
}
}
#endif // !ASMJIT_DISABLE_LOGGER
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - Cleanup]
// ============================================================================
void Context::cleanup() {
VarData** array = _contextVd.getData();
size_t length = _contextVd.getLength();
for (size_t i = 0; i < length; i++) {
VarData* vd = array[i];
vd->resetLocalId();
vd->resetRegIndex();
}
_contextVd.reset(false);
_extraBlock = nullptr;
}
// ============================================================================
// [asmjit::Context - CompileFunc]
// ============================================================================
Error Context::compile(HLFunc* func) {
HLNode* end = func->getEnd();
HLNode* stop = end->getNext();
_func = func;
_stop = stop;
_extraBlock = end;
ASMJIT_PROPAGATE_ERROR(fetch());
ASMJIT_PROPAGATE_ERROR(removeUnreachableCode());
ASMJIT_PROPAGATE_ERROR(livenessAnalysis());
Compiler* compiler = getCompiler();
#if !defined(ASMJIT_DISABLE_LOGGER)
if (compiler->getAssembler()->hasLogger())
ASMJIT_PROPAGATE_ERROR(annotate());
#endif // !ASMJIT_DISABLE_LOGGER
ASMJIT_PROPAGATE_ERROR(translate());
// We alter the compiler cursor, because it doesn't make sense to reference
// it after compilation - some nodes may disappear and it's forbidden to add
// new code after the compilation is done.
compiler->_setCursor(nullptr);
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View File

@ -0,0 +1,901 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_COMPILERCONTEXT_P_H
#define _ASMJIT_BASE_COMPILERCONTEXT_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compiler.h"
#include "../base/podvector.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VarAttrFlags]
// ============================================================================
//! \internal
//!
//! Variable attribute flags.
ASMJIT_ENUM(VarAttrFlags) {
//! Read from register.
kVarAttrRReg = 0x00000001,
//! Write to register.
kVarAttrWReg = 0x00000002,
//! Read/Write from/to register.
kVarAttrXReg = 0x00000003,
//! Read from memory.
kVarAttrRMem = 0x00000004,
//! Write to memory.
kVarAttrWMem = 0x00000008,
//! Read/Write from/to memory.
kVarAttrXMem = 0x0000000C,
//! Register allocator can decide if input will be in register or memory.
kVarAttrRDecide = 0x00000010,
//! Register allocator can decide if output will be in register or memory.
kVarAttrWDecide = 0x00000020,
//! Register allocator can decide if in/out will be in register or memory.
kVarAttrXDecide = 0x00000030,
//! Variable is converted to other type/class on the input.
kVarAttrRConv = 0x00000040,
//! Variable is converted from other type/class on the output.
kVarAttrWConv = 0x00000080,
//! Combination of `kVarAttrRConv` and `kVarAttrWConv`.
kVarAttrXConv = 0x000000C0,
//! Variable is a function call operand.
kVarAttrRCall = 0x00000100,
//! Variable is a function argument passed in register.
kVarAttrRFunc = 0x00000200,
//! Variable is a function return value passed in register.
kVarAttrWFunc = 0x00000400,
//! Variable should be spilled.
kVarAttrSpill = 0x00000800,
//! Variable should be unused at the end of the instruction/node.
kVarAttrUnuse = 0x00001000,
//! All in-flags.
kVarAttrRAll = kVarAttrRReg | kVarAttrRMem | kVarAttrRDecide | kVarAttrRCall | kVarAttrRFunc,
//! All out-flags.
kVarAttrWAll = kVarAttrWReg | kVarAttrWMem | kVarAttrWDecide | kVarAttrWFunc,
//! Variable is already allocated on the input.
kVarAttrAllocRDone = 0x00400000,
//! Variable is already allocated on the output.
kVarAttrAllocWDone = 0x00800000,
kVarAttrX86GpbLo = 0x10000000,
kVarAttrX86GpbHi = 0x20000000,
kVarAttrX86Fld4 = 0x40000000,
kVarAttrX86Fld8 = 0x80000000
};
// ============================================================================
// [asmjit::VarHint]
// ============================================================================
//! \internal
//!
//! Variable hint (used by `Compiler)`.
//!
//! \sa Compiler.
ASMJIT_ENUM(VarHint) {
//! Alloc variable.
kVarHintAlloc = 0,
//! Spill variable.
kVarHintSpill = 1,
//! Save variable if modified.
kVarHintSave = 2,
//! Save variable if modified and mark it as unused.
kVarHintSaveAndUnuse = 3,
//! Mark variable as unused.
kVarHintUnuse = 4
};
// ============================================================================
// [asmjit::kVarState]
// ============================================================================
// TODO: Rename `kVarState` or `VarState`.
//! \internal
//!
//! State of variable.
//!
//! NOTE: Variable states are used only during register allocation.
ASMJIT_ENUM(kVarState) {
//! Variable is currently not used.
kVarStateNone = 0,
//! Variable is currently allocated in register.
kVarStateReg = 1,
//! Variable is currently allocated in memory (or has been spilled).
kVarStateMem = 2
};
// ============================================================================
// [asmjit::VarCell]
// ============================================================================
struct VarCell {
ASMJIT_NO_COPY(VarCell)
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get cell offset.
ASMJIT_INLINE int32_t getOffset() const { return _offset; }
//! Set cell offset.
ASMJIT_INLINE void setOffset(int32_t offset) { _offset = offset; }
//! Get cell size.
ASMJIT_INLINE uint32_t getSize() const { return _size; }
//! Set cell size.
ASMJIT_INLINE void setSize(uint32_t size) { _size = size; }
//! Get cell alignment.
ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; }
//! Set cell alignment.
ASMJIT_INLINE void setAlignment(uint32_t alignment) { _alignment = alignment; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Next active cell.
VarCell* _next;
//! Offset, relative to base-offset.
int32_t _offset;
//! Size.
uint32_t _size;
//! Alignment.
uint32_t _alignment;
};
// ============================================================================
// [asmjit::VarData]
// ============================================================================
//! HL variable data (base).
struct VarData {
// --------------------------------------------------------------------------
// [Accessors - Base]
// --------------------------------------------------------------------------
//! Get variable name.
ASMJIT_INLINE const char* getName() const { return _name; }
//! Get variable id.
ASMJIT_INLINE uint32_t getId() const { return _id; }
//! Get variable type.
ASMJIT_INLINE uint32_t getType() const { return _type; }
//! Get variable class.
ASMJIT_INLINE uint32_t getClass() const { return _class; }
// --------------------------------------------------------------------------
// [Accessors - LocalId]
// --------------------------------------------------------------------------
//! Get whether the variable has a local id.
ASMJIT_INLINE bool hasLocalId() const { return _localId != kInvalidValue; }
//! Get a variable's local id.
ASMJIT_INLINE uint32_t getLocalId() const { return _localId; }
//! Set a variable's local id.
ASMJIT_INLINE void setLocalId(uint32_t localId) { _localId = localId; }
//! Reset a variable's local id.
ASMJIT_INLINE void resetLocalId() { _localId = kInvalidValue; }
// --------------------------------------------------------------------------
// [Accessors - Priority]
// --------------------------------------------------------------------------
//! Get variable priority, used by compiler to decide which variable to spill.
ASMJIT_INLINE uint32_t getPriority() const { return _priority; }
//! Set variable priority.
ASMJIT_INLINE void setPriority(uint32_t priority) {
ASMJIT_ASSERT(priority <= 0xFF);
_priority = static_cast<uint8_t>(priority);
}
// --------------------------------------------------------------------------
// [Accessors - State]
// --------------------------------------------------------------------------
//! Get variable state, only used by `Context`.
ASMJIT_INLINE uint32_t getState() const { return _state; }
//! Set variable state, only used by `Context`.
ASMJIT_INLINE void setState(uint32_t state) {
ASMJIT_ASSERT(state <= 0xFF);
_state = static_cast<uint8_t>(state);
}
// --------------------------------------------------------------------------
// [Accessors - RegIndex]
// --------------------------------------------------------------------------
//! Get register index.
ASMJIT_INLINE uint32_t getRegIndex() const { return _regIndex; }
//! Set register index.
ASMJIT_INLINE void setRegIndex(uint32_t regIndex) {
ASMJIT_ASSERT(regIndex <= kInvalidReg);
_regIndex = static_cast<uint8_t>(regIndex);
}
//! Reset register index.
ASMJIT_INLINE void resetRegIndex() {
_regIndex = static_cast<uint8_t>(kInvalidReg);
}
// --------------------------------------------------------------------------
// [Accessors - HomeIndex/Mask]
// --------------------------------------------------------------------------
//! Get home registers mask.
ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; }
//! Add a home register index to the home registers mask.
ASMJIT_INLINE void addHomeIndex(uint32_t regIndex) { _homeMask |= Utils::mask(regIndex); }
// --------------------------------------------------------------------------
// [Accessors - Flags]
// --------------------------------------------------------------------------
//! Get variable flags.
ASMJIT_INLINE uint32_t getFlags() const { return _flags; }
//! Get whether the VarData is only memory allocated on the stack.
ASMJIT_INLINE bool isStack() const { return static_cast<bool>(_isStack); }
//! Get whether the variable is a function argument passed through memory.
ASMJIT_INLINE bool isMemArg() const { return static_cast<bool>(_isMemArg); }
//! Get variable content can be calculated by a simple instruction.
ASMJIT_INLINE bool isCalculated() const { return static_cast<bool>(_isCalculated); }
//! Get whether to save variable when it's unused (spill).
ASMJIT_INLINE bool saveOnUnuse() const { return static_cast<bool>(_saveOnUnuse); }
//! Get whether the variable was changed.
ASMJIT_INLINE bool isModified() const { return static_cast<bool>(_modified); }
//! Set whether the variable was changed.
ASMJIT_INLINE void setModified(bool modified) { _modified = modified; }
//! Get variable alignment.
ASMJIT_INLINE uint32_t getAlignment() const { return _alignment; }
//! Get variable size.
ASMJIT_INLINE uint32_t getSize() const { return _size; }
//! Get home memory offset.
ASMJIT_INLINE int32_t getMemOffset() const { return _memOffset; }
//! Set home memory offset.
ASMJIT_INLINE void setMemOffset(int32_t offset) { _memOffset = offset; }
//! Get home memory cell.
ASMJIT_INLINE VarCell* getMemCell() const { return _memCell; }
//! Set home memory cell.
ASMJIT_INLINE void setMemCell(VarCell* cell) { _memCell = cell; }
// --------------------------------------------------------------------------
// [Accessors - Temporary Usage]
// --------------------------------------------------------------------------
//! Get temporary VarAttr.
ASMJIT_INLINE VarAttr* getVa() const { return _va; }
//! Set temporary VarAttr.
ASMJIT_INLINE void setVa(VarAttr* va) { _va = va; }
//! Reset temporary VarAttr.
ASMJIT_INLINE void resetVa() { _va = nullptr; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable name.
const char* _name;
//! Variable id.
uint32_t _id;
//! Variable's local id (initially `kInvalidValue`).
uint32_t _localId;
//! Variable type.
uint8_t _type;
//! Variable class.
uint8_t _class;
//! Variable flags.
uint8_t _flags;
//! Variable priority.
uint8_t _priority;
//! Variable state (connected with actual `VarState)`.
uint8_t _state;
//! Actual register index (only used by `Context)`, during translate.
uint8_t _regIndex;
//! Whether the variable is only used as memory allocated on the stack.
uint8_t _isStack : 1;
//! Whether the variable is a function argument passed through memory.
uint8_t _isMemArg : 1;
//! Whether variable content can be calculated by a simple instruction.
//!
//! This is used mainly by MMX and SSE2 code. This flag indicates that
//! register allocator should never reserve memory for this variable, because
//! the content can be generated by a single instruction (for example PXOR).
uint8_t _isCalculated : 1;
//! Save on unuse (at end of the variable scope).
uint8_t _saveOnUnuse : 1;
//! Whether variable was changed (connected with actual `VarState)`.
uint8_t _modified : 1;
//! \internal
uint8_t _reserved0 : 3;
//! Variable natural alignment.
uint8_t _alignment;
//! Variable size.
uint32_t _size;
//! Mask of all registers variable has been allocated to.
uint32_t _homeMask;
//! Home memory offset.
int32_t _memOffset;
//! Home memory cell, used by `Context` (initially nullptr).
VarCell* _memCell;
//! Register read access statistics.
uint32_t rReadCount;
//! Register write access statistics.
uint32_t rWriteCount;
//! Memory read statistics.
uint32_t mReadCount;
//! Memory write statistics.
uint32_t mWriteCount;
// --------------------------------------------------------------------------
// [Members - Temporary Usage]
// --------------------------------------------------------------------------
// These variables are only used during register allocation. They are
// initialized by init() phase and reset by cleanup() phase.
union {
//! Temporary link to VarAttr* used by the `Context` used in
//! various phases, but always set back to nullptr when finished.
//!
//! This temporary data is designed to be used by algorithms that need to
//! store some data into variables themselves during compilation. But it's
//! expected that after variable is compiled & translated the data is set
//! back to zero/null. Initial value is nullptr.
VarAttr* _va;
//! \internal
//!
//! Same as `_va` just provided as `uintptr_t`.
uintptr_t _vaUInt;
};
};
// ============================================================================
// [asmjit::VarAttr]
// ============================================================================
struct VarAttr {
// --------------------------------------------------------------------------
// [Setup]
// --------------------------------------------------------------------------
ASMJIT_INLINE void setup(VarData* vd, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) {
_vd = vd;
_flags = flags;
_varCount = 0;
_inRegIndex = kInvalidReg;
_outRegIndex = kInvalidReg;
_reserved = 0;
_inRegs = inRegs;
_allocableRegs = allocableRegs;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get VarData.
ASMJIT_INLINE VarData* getVd() const { return _vd; }
//! Set VarData.
ASMJIT_INLINE void setVd(VarData* vd) { _vd = vd; }
//! Get flags.
ASMJIT_INLINE uint32_t getFlags() const { return _flags; }
//! Set flags.
ASMJIT_INLINE void setFlags(uint32_t flags) { _flags = flags; }
//! Get whether `flag` is on.
ASMJIT_INLINE bool hasFlag(uint32_t flag) { return (_flags & flag) != 0; }
//! Add `flags`.
ASMJIT_INLINE void orFlags(uint32_t flags) { _flags |= flags; }
//! Mask `flags`.
ASMJIT_INLINE void andFlags(uint32_t flags) { _flags &= flags; }
//! Clear `flags`.
ASMJIT_INLINE void andNotFlags(uint32_t flags) { _flags &= ~flags; }
//! Get how many times the variable is used by the instruction/node.
ASMJIT_INLINE uint32_t getVarCount() const { return _varCount; }
//! Set how many times the variable is used by the instruction/node.
ASMJIT_INLINE void setVarCount(uint32_t count) { _varCount = static_cast<uint8_t>(count); }
//! Add how many times the variable is used by the instruction/node.
ASMJIT_INLINE void addVarCount(uint32_t count = 1) { _varCount += static_cast<uint8_t>(count); }
//! Get whether the variable has to be allocated in a specific input register.
ASMJIT_INLINE uint32_t hasInRegIndex() const { return _inRegIndex != kInvalidReg; }
//! Get the input register index or `kInvalidReg`.
ASMJIT_INLINE uint32_t getInRegIndex() const { return _inRegIndex; }
//! Set the input register index.
ASMJIT_INLINE void setInRegIndex(uint32_t index) { _inRegIndex = static_cast<uint8_t>(index); }
//! Reset the input register index.
ASMJIT_INLINE void resetInRegIndex() { _inRegIndex = kInvalidReg; }
//! Get whether the variable has to be allocated in a specific output register.
ASMJIT_INLINE uint32_t hasOutRegIndex() const { return _outRegIndex != kInvalidReg; }
//! Get the output register index or `kInvalidReg`.
ASMJIT_INLINE uint32_t getOutRegIndex() const { return _outRegIndex; }
//! Set the output register index.
ASMJIT_INLINE void setOutRegIndex(uint32_t index) { _outRegIndex = static_cast<uint8_t>(index); }
//! Reset the output register index.
ASMJIT_INLINE void resetOutRegIndex() { _outRegIndex = kInvalidReg; }
//! Get whether the mandatory input registers are in used.
ASMJIT_INLINE bool hasInRegs() const { return _inRegs != 0; }
//! Get mandatory input registers (mask).
ASMJIT_INLINE uint32_t getInRegs() const { return _inRegs; }
//! Set mandatory input registers (mask).
ASMJIT_INLINE void setInRegs(uint32_t mask) { _inRegs = mask; }
//! Add mandatory input registers (mask).
ASMJIT_INLINE void addInRegs(uint32_t mask) { _inRegs |= mask; }
//! And mandatory input registers (mask).
ASMJIT_INLINE void andInRegs(uint32_t mask) { _inRegs &= mask; }
//! Clear mandatory input registers (mask).
ASMJIT_INLINE void delInRegs(uint32_t mask) { _inRegs &= ~mask; }
//! Get allocable input registers (mask).
ASMJIT_INLINE uint32_t getAllocableRegs() const { return _allocableRegs; }
//! Set allocable input registers (mask).
ASMJIT_INLINE void setAllocableRegs(uint32_t mask) { _allocableRegs = mask; }
//! Add allocable input registers (mask).
ASMJIT_INLINE void addAllocableRegs(uint32_t mask) { _allocableRegs |= mask; }
//! And allocable input registers (mask).
ASMJIT_INLINE void andAllocableRegs(uint32_t mask) { _allocableRegs &= mask; }
//! Clear allocable input registers (mask).
ASMJIT_INLINE void delAllocableRegs(uint32_t mask) { _allocableRegs &= ~mask; }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE VarAttr& operator=(const VarAttr& other) {
::memcpy(this, &other, sizeof(VarAttr));
return *this;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
VarData* _vd;
//! Flags.
uint32_t _flags;
union {
struct {
//! How many times the variable is used by the instruction/node.
uint8_t _varCount;
//! Input register index or `kInvalidReg` if it's not given.
//!
//! Even if the input register index is not given (i.e. it may by any
//! register), register allocator should assign an index that will be
//! used to persist a variable into this specific index. It's helpful
//! in situations where one variable has to be allocated in multiple
//! registers to determine the register which will be persistent.
uint8_t _inRegIndex;
//! Output register index or `kInvalidReg` if it's not given.
//!
//! Typically `kInvalidReg` if variable is only used on input.
uint8_t _outRegIndex;
//! \internal
uint8_t _reserved;
};
//! \internal
//!
//! Packed data #0.
uint32_t _packed;
};
//! Mandatory input registers.
//!
//! Mandatory input registers are required by the instruction even if
//! there are duplicates. This schema allows us to allocate one variable
//! in one or more register when needed. Required mostly by instructions
//! that have implicit register operands (imul, cpuid, ...) and function
//! call.
uint32_t _inRegs;
//! Allocable input registers.
//!
//! Optional input registers is a mask of all allocable registers for a given
//! variable where we have to pick one of them. This mask is usually not used
//! when _inRegs is set. If both masks are used then the register
//! allocator tries first to find an intersection between these and allocates
//! an extra slot if not found.
uint32_t _allocableRegs;
};
// ============================================================================
// [asmjit::VarMap]
// ============================================================================
//! Variables' map related to a single node (instruction / other node).
struct VarMap {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get count of variables (all).
ASMJIT_INLINE uint32_t getVaCount() const {
return _vaCount;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variables count.
uint32_t _vaCount;
};
// ============================================================================
// [asmjit::VarState]
// ============================================================================
//! Variables' state.
struct VarState {};
// ============================================================================
// [asmjit::Context]
// ============================================================================
//! \internal
//!
//! Code generation context is the logic behind `Compiler`. The context is
//! used to compile the code stored in `Compiler`.
struct Context {
ASMJIT_NO_COPY(Context)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
Context(Compiler* compiler);
virtual ~Context();
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset the whole context.
virtual void reset(bool releaseMemory = false);
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get compiler.
ASMJIT_INLINE Compiler* getCompiler() const { return _compiler; }
//! Get function.
ASMJIT_INLINE HLFunc* getFunc() const { return _func; }
//! Get stop node.
ASMJIT_INLINE HLNode* getStop() const { return _stop; }
//! Get start of the current scope.
ASMJIT_INLINE HLNode* getStart() const { return _start; }
//! Get end of the current scope.
ASMJIT_INLINE HLNode* getEnd() const { return _end; }
//! Get extra block.
ASMJIT_INLINE HLNode* getExtraBlock() const { return _extraBlock; }
//! Set extra block.
ASMJIT_INLINE void setExtraBlock(HLNode* node) { _extraBlock = node; }
// --------------------------------------------------------------------------
// [Error]
// --------------------------------------------------------------------------
//! Get the last error code.
ASMJIT_INLINE Error getLastError() const {
return getCompiler()->getLastError();
}
//! Set the last error code and propagate it through the error handler.
ASMJIT_INLINE Error setLastError(Error error, const char* message = nullptr) {
return getCompiler()->setLastError(error, message);
}
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get current state.
ASMJIT_INLINE VarState* getState() const { return _state; }
//! Load current state from `target` state.
virtual void loadState(VarState* src) = 0;
//! Save current state, returning new `VarState` instance.
virtual VarState* saveState() = 0;
//! Change the current state to `target` state.
virtual void switchState(VarState* src) = 0;
//! Change the current state to the intersection of two states `a` and `b`.
virtual void intersectStates(VarState* a, VarState* b) = 0;
// --------------------------------------------------------------------------
// [Context]
// --------------------------------------------------------------------------
ASMJIT_INLINE Error _registerContextVar(VarData* vd) {
if (vd->hasLocalId())
return kErrorOk;
uint32_t cid = static_cast<uint32_t>(_contextVd.getLength());
ASMJIT_PROPAGATE_ERROR(_contextVd.append(vd));
vd->setLocalId(cid);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Mem]
// --------------------------------------------------------------------------
VarCell* _newVarCell(VarData* vd);
VarCell* _newStackCell(uint32_t size, uint32_t alignment);
ASMJIT_INLINE VarCell* getVarCell(VarData* vd) {
VarCell* cell = vd->getMemCell();
return cell ? cell : _newVarCell(vd);
}
virtual Error resolveCellOffsets();
// --------------------------------------------------------------------------
// [Bits]
// --------------------------------------------------------------------------
ASMJIT_INLINE BitArray* newBits(uint32_t len) {
return static_cast<BitArray*>(
_zoneAllocator.allocZeroed(static_cast<size_t>(len) * BitArray::kEntitySize));
}
ASMJIT_INLINE BitArray* copyBits(const BitArray* src, uint32_t len) {
return static_cast<BitArray*>(
_zoneAllocator.dup(src, static_cast<size_t>(len) * BitArray::kEntitySize));
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
//! Fetch.
//!
//! Fetch iterates over all nodes and gathers information about all variables
//! used. The process generates information required by register allocator,
//! variable liveness analysis and translator.
virtual Error fetch() = 0;
// --------------------------------------------------------------------------
// [Unreachable Code]
// --------------------------------------------------------------------------
//! Add unreachable-flow data to the unreachable flow list.
ASMJIT_INLINE Error addUnreachableNode(HLNode* node) {
PodList<HLNode*>::Link* link = _zoneAllocator.allocT<PodList<HLNode*>::Link>();
if (link == nullptr)
return setLastError(kErrorNoHeapMemory);
link->setValue(node);
_unreachableList.append(link);
return kErrorOk;
}
//! Remove unreachable code.
virtual Error removeUnreachableCode();
// --------------------------------------------------------------------------
// [Code-Flow]
// --------------------------------------------------------------------------
//! Add returning node (i.e. node that returns and where liveness analysis
//! should start).
ASMJIT_INLINE Error addReturningNode(HLNode* node) {
PodList<HLNode*>::Link* link = _zoneAllocator.allocT<PodList<HLNode*>::Link>();
if (link == nullptr)
return setLastError(kErrorNoHeapMemory);
link->setValue(node);
_returningList.append(link);
return kErrorOk;
}
//! Add jump-flow data to the jcc flow list.
ASMJIT_INLINE Error addJccNode(HLNode* node) {
PodList<HLNode*>::Link* link = _zoneAllocator.allocT<PodList<HLNode*>::Link>();
if (link == nullptr)
return setLastError(kErrorNoHeapMemory);
link->setValue(node);
_jccList.append(link);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Analyze]
// --------------------------------------------------------------------------
//! Perform variable liveness analysis.
//!
//! Analysis phase iterates over nodes in reverse order and generates a bit
//! array describing variables that are alive at every node in the function.
//! When the analysis start all variables are assumed dead. When a read or
//! read/write operations of a variable is detected the variable becomes
//! alive; when only write operation is detected the variable becomes dead.
//!
//! When a label is found all jumps to that label are followed and analysis
//! repeats until all variables are resolved.
virtual Error livenessAnalysis();
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate() = 0;
virtual Error formatInlineComment(StringBuilder& dst, HLNode* node);
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
//! Translate code by allocating registers and handling state changes.
virtual Error translate() = 0;
// --------------------------------------------------------------------------
// [Cleanup]
// --------------------------------------------------------------------------
virtual void cleanup();
// --------------------------------------------------------------------------
// [Compile]
// --------------------------------------------------------------------------
virtual Error compile(HLFunc* func);
// --------------------------------------------------------------------------
// [Serialize]
// --------------------------------------------------------------------------
virtual Error serialize(Assembler* assembler, HLNode* start, HLNode* stop) = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Compiler.
Compiler* _compiler;
//! Function.
HLFunc* _func;
//! Zone allocator.
Zone _zoneAllocator;
//! \internal
typedef void (ASMJIT_CDECL* TraceNodeFunc)(Context* self, HLNode* node_, const char* prefix);
//! \internal
//!
//! Only non-NULL when ASMJIT_TRACE is enabled.
TraceNodeFunc _traceNode;
//! \internal
//!
//! Offset (how many bytes to add) to `VarMap` to get `VarAttr` array. Used
//! by liveness analysis shared across all backends. This is needed because
//! `VarMap` is a base class for a specialized version that liveness analysis
//! doesn't use, it just needs `VarAttr` array.
uint32_t _varMapToVaListOffset;
//! Start of the current active scope.
HLNode* _start;
//! End of the current active scope.
HLNode* _end;
//! Node that is used to insert extra code after the function body.
HLNode* _extraBlock;
//! Stop node.
HLNode* _stop;
//! Unreachable nodes.
PodList<HLNode*> _unreachableList;
//! Returning nodes.
PodList<HLNode*> _returningList;
//! Jump nodes.
PodList<HLNode*> _jccList;
//! All variables used by the current function.
PodVector<VarData*> _contextVd;
//! Memory used to spill variables.
VarCell* _memVarCells;
//! Memory used to alloc memory on the stack.
VarCell* _memStackCells;
//! Count of 1-byte cells.
uint32_t _mem1ByteVarsUsed;
//! Count of 2-byte cells.
uint32_t _mem2ByteVarsUsed;
//! Count of 4-byte cells.
uint32_t _mem4ByteVarsUsed;
//! Count of 8-byte cells.
uint32_t _mem8ByteVarsUsed;
//! Count of 16-byte cells.
uint32_t _mem16ByteVarsUsed;
//! Count of 32-byte cells.
uint32_t _mem32ByteVarsUsed;
//! Count of 64-byte cells.
uint32_t _mem64ByteVarsUsed;
//! Count of stack memory cells.
uint32_t _memStackCellsUsed;
//! Maximum memory alignment used by the function.
uint32_t _memMaxAlign;
//! Count of bytes used by variables.
uint32_t _memVarTotal;
//! Count of bytes used by stack.
uint32_t _memStackTotal;
//! Count of bytes used by variables and stack after alignment.
uint32_t _memAllTotal;
//! Default lenght of annotated instruction.
uint32_t _annotationLength;
//! Current state (used by register allocator).
VarState* _state;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_COMPILERCONTEXT_P_H

View File

@ -0,0 +1,679 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_COMPILERFUNC_H
#define _ASMJIT_BASE_COMPILERFUNC_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/operand.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::FuncHint]
// ============================================================================
//! Function hints.
//!
//! For a platform specific calling conventions, see:
//! - `X86FuncHint` - X86/X64 function hints.
ASMJIT_ENUM(FuncHint) {
//! Generate a naked function by omitting its prolog and epilog (default true).
//!
//! Naked functions should always result in less code required for function's
//! prolog and epilog. In addition, on X86/64 naked functions save one register
//! (ebp or rbp), which can be used by the function instead.
kFuncHintNaked = 0,
//! Generate a compact function prolog/epilog if possible (default true).
//!
//! X86/X64 Specific
//! ----------------
//!
//! Use shorter, but possible slower prolog/epilog sequence to save/restore
//! registers. At the moment this only enables emitting `leave` in function's
//! epilog to make the code shorter, however, the counterpart `enter` is not
//! used in function's prolog for performance reasons.
kFuncHintCompact = 1,
//! Emit `emms` instruction in the function's epilog.
kFuncHintX86Emms = 17,
//! Emit `sfence` instruction in the function's epilog.
kFuncHintX86SFence = 18,
//! Emit `lfence` instruction in the function's epilog.
kFuncHintX86LFence = 19
};
// ============================================================================
// [asmjit::FuncFlags]
// ============================================================================
//! Function flags.
ASMJIT_ENUM(FuncFlags) {
//! Whether the function is using naked (minimal) prolog / epilog.
kFuncFlagIsNaked = 0x00000001,
//! Whether an another function is called from this function.
kFuncFlagIsCaller = 0x00000002,
//! Whether the stack is not aligned to the required stack alignment,
//! thus it has to be aligned manually.
kFuncFlagIsStackMisaligned = 0x00000004,
//! Whether the stack pointer is adjusted by the stack size needed
//! to save registers and function variables.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Stack pointer (ESP/RSP) is adjusted by 'sub' instruction in prolog and by
//! 'add' instruction in epilog (only if function is not naked). If function
//! needs to perform manual stack alignment more instructions are used to
//! adjust the stack (like "and zsp, -Alignment").
kFuncFlagIsStackAdjusted = 0x00000008,
//! Whether the function is finished using `Compiler::endFunc()`.
kFuncFlagIsFinished = 0x80000000,
//! Whether to emit `leave` instead of two instructions in case that the
//! function saves and restores the frame pointer.
kFuncFlagX86Leave = 0x00010000,
//! Whether it's required to move arguments to a new stack location,
//! because of manual aligning.
kFuncFlagX86MoveArgs = 0x00040000,
//! Whether to emit `emms` instruction in epilog (auto-detected).
kFuncFlagX86Emms = 0x01000000,
//! Whether to emit `sfence` instruction in epilog (auto-detected).
//!
//! `kFuncFlagX86SFence` with `kFuncFlagX86LFence` results in emitting `mfence`.
kFuncFlagX86SFence = 0x02000000,
//! Whether to emit `lfence` instruction in epilog (auto-detected).
//!
//! `kFuncFlagX86SFence` with `kFuncFlagX86LFence` results in emitting `mfence`.
kFuncFlagX86LFence = 0x04000000
};
// ============================================================================
// [asmjit::FuncDir]
// ============================================================================
//! Function arguments direction.
ASMJIT_ENUM(FuncDir) {
//! Arguments are passed left to right.
//!
//! This arguments direction is unusual in C, however it's used in Pascal.
kFuncDirLTR = 0,
//! Arguments are passed right ro left
//!
//! This is the default argument direction in C.
kFuncDirRTL = 1
};
// ============================================================================
// [asmjit::FuncMisc]
// ============================================================================
enum {
//! Function doesn't have variable number of arguments (`...`) (default).
kFuncNoVarArgs = 0xFF,
//! Invalid stack offset in function or function parameter.
kFuncStackInvalid = -1
};
// ============================================================================
// [asmjit::FuncArgIndex]
// ============================================================================
//! Function argument index (lo/hi).
ASMJIT_ENUM(FuncArgIndex) {
//! Maxumum number of function arguments supported by AsmJit.
kFuncArgCount = 16,
//! Extended maximum number of arguments (used internally).
kFuncArgCountLoHi = kFuncArgCount * 2,
//! Index to the LO part of function argument (default).
//!
//! This value is typically omitted and added only if there is HI argument
//! accessed.
kFuncArgLo = 0,
//! Index to the HI part of function argument.
//!
//! HI part of function argument depends on target architecture. On x86 it's
//! typically used to transfer 64-bit integers (they form a pair of 32-bit
//! integers).
kFuncArgHi = kFuncArgCount
};
// ============================================================================
// [asmjit::FuncRet]
// ============================================================================
//! Function return value (lo/hi) specification.
ASMJIT_ENUM(FuncRet) {
//! Index to the LO part of function return value.
kFuncRetLo = 0,
//! Index to the HI part of function return value.
kFuncRetHi = 1
};
// ============================================================================
// [asmjit::TypeId]
// ============================================================================
//! Function builder's `void` type.
struct Void {};
//! Function builder's `int8_t` type.
struct Int8Type {};
//! Function builder's `uint8_t` type.
struct UInt8Type {};
//! Function builder's `int16_t` type.
struct Int16Type {};
//! Function builder's `uint16_t` type.
struct UInt16Type {};
//! Function builder's `int32_t` type.
struct Int32Type {};
//! Function builder's `uint32_t` type.
struct UInt32Type {};
//! Function builder's `int64_t` type.
struct Int64Type {};
//! Function builder's `uint64_t` type.
struct UInt64Type {};
//! Function builder's `intptr_t` type.
struct IntPtrType {};
//! Function builder's `uintptr_t` type.
struct UIntPtrType {};
//! Function builder's `float` type.
struct FloatType {};
//! Function builder's `double` type.
struct DoubleType {};
#if !defined(ASMJIT_DOCGEN)
template<typename T>
struct TypeId {
// Let it fail here if `T` was not specialized.
};
template<typename T>
struct TypeId<T*> {
enum { kId = kVarTypeIntPtr };
};
template<typename T>
struct TypeIdOfInt {
enum { kId = (sizeof(T) == 1) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt8 : kVarTypeUInt8 ) :
(sizeof(T) == 2) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt16 : kVarTypeUInt16) :
(sizeof(T) == 4) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt32 : kVarTypeUInt32) :
(sizeof(T) == 8) ? (int)(IntTraits<T>::kIsSigned ? kVarTypeInt64 : kVarTypeUInt64) : (int)kInvalidVar
};
};
#define ASMJIT_TYPE_ID(T, ID) \
template<> struct TypeId<T> { enum { kId = ID }; }
ASMJIT_TYPE_ID(void , kInvalidVar);
ASMJIT_TYPE_ID(signed char , TypeIdOfInt<signed char>::kId);
ASMJIT_TYPE_ID(unsigned char , TypeIdOfInt<unsigned char>::kId);
ASMJIT_TYPE_ID(short , TypeIdOfInt<short>::kId);
ASMJIT_TYPE_ID(unsigned short , TypeIdOfInt<unsigned short>::kId);
ASMJIT_TYPE_ID(int , TypeIdOfInt<int>::kId);
ASMJIT_TYPE_ID(unsigned int , TypeIdOfInt<unsigned int>::kId);
ASMJIT_TYPE_ID(long , TypeIdOfInt<long>::kId);
ASMJIT_TYPE_ID(unsigned long , TypeIdOfInt<unsigned long>::kId);
ASMJIT_TYPE_ID(float , kVarTypeFp32);
ASMJIT_TYPE_ID(double , kVarTypeFp64);
#if ASMJIT_CC_HAS_NATIVE_CHAR
ASMJIT_TYPE_ID(char , TypeIdOfInt<char>::kId);
#endif
#if ASMJIT_CC_HAS_NATIVE_WCHAR_T
ASMJIT_TYPE_ID(wchar_t , TypeIdOfInt<wchar_t>::kId);
#endif
#if ASMJIT_CC_HAS_NATIVE_CHAR16_T
ASMJIT_TYPE_ID(char16_t , TypeIdOfInt<char16_t>::kId);
#endif
#if ASMJIT_CC_HAS_NATIVE_CHAR32_T
ASMJIT_TYPE_ID(char32_t , TypeIdOfInt<char32_t>::kId);
#endif
#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(16, 0, 0)
ASMJIT_TYPE_ID(__int64 , TypeIdOfInt<__int64>::kId);
ASMJIT_TYPE_ID(unsigned __int64 , TypeIdOfInt<unsigned __int64>::kId);
#else
ASMJIT_TYPE_ID(long long , TypeIdOfInt<long long>::kId);
ASMJIT_TYPE_ID(unsigned long long, TypeIdOfInt<unsigned long long>::kId);
#endif
ASMJIT_TYPE_ID(Void , kInvalidVar);
ASMJIT_TYPE_ID(Int8Type , kVarTypeInt8);
ASMJIT_TYPE_ID(UInt8Type , kVarTypeUInt8);
ASMJIT_TYPE_ID(Int16Type , kVarTypeInt16);
ASMJIT_TYPE_ID(UInt16Type , kVarTypeUInt16);
ASMJIT_TYPE_ID(Int32Type , kVarTypeInt32);
ASMJIT_TYPE_ID(UInt32Type , kVarTypeUInt32);
ASMJIT_TYPE_ID(Int64Type , kVarTypeInt64);
ASMJIT_TYPE_ID(UInt64Type , kVarTypeUInt64);
ASMJIT_TYPE_ID(IntPtrType , kVarTypeIntPtr);
ASMJIT_TYPE_ID(UIntPtrType , kVarTypeUIntPtr);
ASMJIT_TYPE_ID(FloatType , kVarTypeFp32);
ASMJIT_TYPE_ID(DoubleType , kVarTypeFp64);
#endif // !ASMJIT_DOCGEN
// ============================================================================
// [asmjit::FuncInOut]
// ============================================================================
//! Function in/out - argument or return value translated from `FuncPrototype`.
struct FuncInOut {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getVarType() const noexcept { return _varType; }
ASMJIT_INLINE bool hasRegIndex() const noexcept { return _regIndex != kInvalidReg; }
ASMJIT_INLINE uint32_t getRegIndex() const noexcept { return _regIndex; }
ASMJIT_INLINE bool hasStackOffset() const noexcept { return _stackOffset != kFuncStackInvalid; }
ASMJIT_INLINE int32_t getStackOffset() const noexcept { return static_cast<int32_t>(_stackOffset); }
//! Get whether the argument / return value is assigned.
ASMJIT_INLINE bool isSet() const noexcept {
return (_regIndex != kInvalidReg) | (_stackOffset != kFuncStackInvalid);
}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset the function argument to "unassigned state".
ASMJIT_INLINE void reset() noexcept { _packed = 0xFFFFFFFFU; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
//! Variable type, see \ref VarType.
uint8_t _varType;
//! Register index if argument / return value is a register.
uint8_t _regIndex;
//! Stack offset if argument / return value is on the stack.
int16_t _stackOffset;
};
//! All members packed into single 32-bit integer.
uint32_t _packed;
};
};
// ============================================================================
// [asmjit::FuncPrototype]
// ============================================================================
//! Function prototype.
//!
//! Function prototype contains information about function return type, count
//! of arguments and their types. Function prototype is a low level structure
//! which doesn't contain platform specific or calling convention specific
//! information. Function prototype is used to create a `FuncDecl`.
struct FuncPrototype {
// --------------------------------------------------------------------------
// [Setup]
// --------------------------------------------------------------------------
//! Setup the prototype.
ASMJIT_INLINE void setup(
uint32_t callConv,
uint32_t ret,
const uint32_t* args, uint32_t numArgs) noexcept {
ASMJIT_ASSERT(callConv <= 0xFF);
ASMJIT_ASSERT(numArgs <= 0xFF);
_callConv = static_cast<uint8_t>(callConv);
_varArgs = kFuncNoVarArgs;
_numArgs = static_cast<uint8_t>(numArgs);
_reserved = 0;
_ret = ret;
_args = args;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the function's calling convention.
ASMJIT_INLINE uint32_t getCallConv() const noexcept { return _callConv; }
//! Get the variable arguments `...` index, `kFuncNoVarArgs` if none.
ASMJIT_INLINE uint32_t getVarArgs() const noexcept { return _varArgs; }
//! Get the number of function arguments.
ASMJIT_INLINE uint32_t getNumArgs() const noexcept { return _numArgs; }
//! Get the return value type.
ASMJIT_INLINE uint32_t getRet() const noexcept { return _ret; }
//! Get the type of the argument at index `i`.
ASMJIT_INLINE uint32_t getArg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < _numArgs);
return _args[i];
}
//! Get the array of function arguments' types.
ASMJIT_INLINE const uint32_t* getArgs() const noexcept { return _args; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t _callConv;
uint8_t _varArgs;
uint8_t _numArgs;
uint8_t _reserved;
uint32_t _ret;
const uint32_t* _args;
};
// ============================================================================
// [asmjit::FuncBuilderX]
// ============================================================================
// TODO: Rename to `DynamicFuncBuilder`
//! Custom function builder for up to 32 function arguments.
struct FuncBuilderX : public FuncPrototype {
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE FuncBuilderX(uint32_t callConv = kCallConvHost) noexcept {
setup(callConv, kInvalidVar, _builderArgList, 0);
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE void setCallConv(uint32_t callConv) noexcept {
ASMJIT_ASSERT(callConv <= 0xFF);
_callConv = static_cast<uint8_t>(callConv);
}
//! Set the return type to `retType`.
ASMJIT_INLINE void setRet(uint32_t retType) noexcept {
_ret = retType;
}
//! Set the return type based on `T`.
template<typename T>
ASMJIT_INLINE void setRetT() noexcept { setRet(TypeId<T>::kId); }
//! Set the argument at index `i` to the `type`
ASMJIT_INLINE void setArg(uint32_t i, uint32_t type) noexcept {
ASMJIT_ASSERT(i < _numArgs);
_builderArgList[i] = type;
}
//! Set the argument at index `i` to the type based on `T`.
template<typename T>
ASMJIT_INLINE void setArgT(uint32_t i) noexcept { setArg(i, TypeId<T>::kId); }
//! Append an argument of `type` to the function prototype.
ASMJIT_INLINE void addArg(uint32_t type) noexcept {
ASMJIT_ASSERT(_numArgs < kFuncArgCount);
_builderArgList[_numArgs++] = type;
}
//! Append an argument of type based on `T` to the function prototype.
template<typename T>
ASMJIT_INLINE void addArgT() noexcept { addArg(TypeId<T>::kId); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _builderArgList[kFuncArgCount];
};
//! \internal
#define T(_Type_) TypeId<_Type_>::kId
//! Function prototype (no args).
template<typename RET>
struct FuncBuilder0 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder0(uint32_t callConv = kCallConvHost) noexcept {
setup(callConv, T(RET), nullptr, 0);
}
};
//! Function prototype (1 argument).
template<typename RET, typename P0>
struct FuncBuilder1 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder1(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (2 arguments).
template<typename RET, typename P0, typename P1>
struct FuncBuilder2 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder2(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (3 arguments).
template<typename RET, typename P0, typename P1, typename P2>
struct FuncBuilder3 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder3(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (4 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3>
struct FuncBuilder4 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder4(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (5 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4>
struct FuncBuilder5 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder5(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (6 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5>
struct FuncBuilder6 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder6(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (7 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
struct FuncBuilder7 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder7(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (8 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
struct FuncBuilder8 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder8(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (9 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
struct FuncBuilder9 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder9(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
//! Function prototype (10 arguments).
template<typename RET, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9>
struct FuncBuilder10 : public FuncPrototype {
ASMJIT_INLINE FuncBuilder10(uint32_t callConv = kCallConvHost) noexcept {
static const uint32_t args[] = { T(P0), T(P1), T(P2), T(P3), T(P4), T(P5), T(P6), T(P7), T(P8), T(P9) };
setup(callConv, T(RET), args, ASMJIT_ARRAY_SIZE(args));
}
};
#undef T
// ============================================================================
// [asmjit::FuncDecl]
// ============================================================================
//! Function declaration.
struct FuncDecl {
// --------------------------------------------------------------------------
// [Accessors - Calling Convention]
// --------------------------------------------------------------------------
//! Get the function's calling convention, see `CallConv`.
ASMJIT_INLINE uint32_t getCallConv() const noexcept { return _callConv; }
//! Get whether the callee pops the stack.
ASMJIT_INLINE uint32_t getCalleePopsStack() const noexcept { return _calleePopsStack; }
//! Get direction of arguments passed on the stack.
//!
//! Direction should be always `kFuncDirRTL`.
//!
//! NOTE: This is related to used calling convention, it's not affected by
//! number of function arguments or their types.
ASMJIT_INLINE uint32_t getArgsDirection() const noexcept { return _argsDirection; }
//! Get stack size needed for function arguments passed on the stack.
ASMJIT_INLINE uint32_t getArgStackSize() const noexcept { return _argStackSize; }
//! Get size of "Red Zone".
ASMJIT_INLINE uint32_t getRedZoneSize() const noexcept { return _redZoneSize; }
//! Get size of "Spill Zone".
ASMJIT_INLINE uint32_t getSpillZoneSize() const noexcept { return _spillZoneSize; }
// --------------------------------------------------------------------------
// [Accessors - Arguments and Return]
// --------------------------------------------------------------------------
//! Get whether the function has a return value.
ASMJIT_INLINE bool hasRet() const noexcept { return _retCount != 0; }
//! Get count of function return values.
ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _retCount; }
//! Get function return value.
ASMJIT_INLINE FuncInOut& getRet(uint32_t index = kFuncRetLo) noexcept { return _rets[index]; }
//! Get function return value.
ASMJIT_INLINE const FuncInOut& getRet(uint32_t index = kFuncRetLo) const noexcept { return _rets[index]; }
//! Get the number of function arguments.
ASMJIT_INLINE uint32_t getNumArgs() const noexcept { return _numArgs; }
//! Get function arguments array.
ASMJIT_INLINE FuncInOut* getArgs() noexcept { return _args; }
//! Get function arguments array (const).
ASMJIT_INLINE const FuncInOut* getArgs() const noexcept { return _args; }
//! Get function argument at index `index`.
ASMJIT_INLINE FuncInOut& getArg(size_t index) noexcept {
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
return _args[index];
}
//! Get function argument at index `index`.
ASMJIT_INLINE const FuncInOut& getArg(size_t index) const noexcept {
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
return _args[index];
}
ASMJIT_INLINE void resetArg(size_t index) noexcept {
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
_args[index].reset();
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Calling convention.
uint8_t _callConv;
//! Whether a callee pops stack.
uint8_t _calleePopsStack : 1;
//! Direction for arguments passed on the stack, see `FuncDir`.
uint8_t _argsDirection : 1;
//! Reserved #0 (alignment).
uint8_t _reserved0 : 6;
//! Number of function arguments.
uint8_t _numArgs;
//! Number of function return values.
uint8_t _retCount;
//! Count of bytes consumed by arguments on the stack (aligned).
uint32_t _argStackSize;
//! Size of "Red Zone".
//!
//! NOTE: Used by AMD64-ABI (128 bytes).
uint16_t _redZoneSize;
//! Size of "Spill Zone".
//!
//! NOTE: Used by WIN64-ABI (32 bytes).
uint16_t _spillZoneSize;
//! Function arguments (LO & HI) mapped to physical registers and stack.
FuncInOut _args[kFuncArgCountLoHi];
//! Function return value(s).
FuncInOut _rets[2];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_COMPILERFUNC_H

View File

@ -0,0 +1,523 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/constpool.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// Binary tree code is based on Julienne Walker's "Andersson Binary Trees"
// article and implementation. However, only three operations are implemented -
// get, insert and traverse.
// ============================================================================
// [asmjit::ConstPool::Tree - Ops]
// ============================================================================
//! \internal
//!
//! Remove left horizontal links.
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) noexcept {
ConstPool::Node* link = node->_link[0];
uint32_t level = node->_level;
if (level != 0 && link != nullptr && link->_level == level) {
node->_link[0] = link->_link[1];
link->_link[1] = node;
node = link;
}
return node;
}
//! \internal
//!
//! Remove consecutive horizontal links.
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) noexcept {
ConstPool::Node* link = node->_link[1];
uint32_t level = node->_level;
if (level != 0 && link != nullptr && link->_link[1] != nullptr && link->_link[1]->_level == level) {
node->_link[1] = link->_link[0];
link->_link[0] = node;
node = link;
node->_level++;
}
return node;
}
ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept {
ConstPool::Node* node = _root;
size_t dataSize = _dataSize;
while (node != nullptr) {
int c = ::memcmp(node->getData(), data, dataSize);
if (c == 0)
return node;
node = node->_link[c < 0];
}
return nullptr;
}
void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
size_t dataSize = _dataSize;
_length++;
if (_root == nullptr) {
_root = newNode;
return;
}
ConstPool::Node* node = _root;
ConstPool::Node* stack[kHeightLimit];
unsigned int top = 0;
unsigned int dir;
// Find a spot and save the stack.
for (;;) {
stack[top++] = node;
dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0;
ConstPool::Node* link = node->_link[dir];
if (link == nullptr)
break;
node = link;
}
// Link and rebalance.
node->_link[dir] = newNode;
while (top > 0) {
// Which child?
node = stack[--top];
if (top != 0) {
dir = stack[top - 1]->_link[1] == node;
}
node = ConstPoolTree_skewNode(node);
node = ConstPoolTree_splitNode(node);
// Fix the parent.
if (top != 0)
stack[top - 1]->_link[dir] = node;
else
_root = node;
}
}
// ============================================================================
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
}
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset() noexcept {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_gaps[i] = nullptr;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
}
// ============================================================================
// [asmjit::ConstPool - Ops]
// ============================================================================
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (gap == nullptr)
return self->_zone->allocT<ConstPool::Gap>();
self->_gapPool = gap->_next;
return gap;
}
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noexcept {
ASMJIT_ASSERT(length > 0);
while (length > 0) {
size_t gapIndex;
size_t gapLength;
if (length >= 16 && Utils::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapLength = 16;
}
else if (length >= 8 && Utils::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapLength = 8;
}
else if (length >= 4 && Utils::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapLength = 4;
}
else if (length >= 2 && Utils::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapLength = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapLength = 1;
}
// We don't have to check for errors here, if this failed nothing really
// happened (just the gap won't be visible) and it will fail again at
// place where checking will cause kErrorNoHeapMemory.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (gap == nullptr)
return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_length = gapLength;
offset += gapLength;
length -= gapLength;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return kErrorInvalidArgument;
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node != nullptr) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can
// be used for the requested data.
size_t offset = ~static_cast<size_t>(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap != nullptr) {
size_t gapOffset = gap->_offset;
size_t gapLength = gap->_length;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Utils::isAligned<size_t>(offset, size));
gapLength -= size;
if (gapLength > 0)
ConstPool_addGap(this, gapOffset, gapLength);
}
gapIndex++;
}
if (offset == ~static_cast<size_t>(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly
// to the 'size'.
size_t diff = Utils::alignDiff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
_size += diff;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (node == nullptr)
return kErrorNoHeapMemory;
_tree[treeIndex].put(node);
_alignment = Utils::iMax<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern.
// We stop at size 4, it probably doesn't make sense to split constants down
// to 1 byte.
size_t pCount = 1;
while (size > 4) {
size >>= 1;
pCount <<= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += size) {
node = _tree[treeIndex].get(pData);
if (node != nullptr)
continue;
node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
_tree[treeIndex].put(node);
}
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
struct ConstPoolFill {
ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
ASMJIT_INLINE void visit(const ConstPool::Node* node) noexcept {
if (!node->_shared)
::memcpy(_dst + node->_offset, node->getData(), _dataSize);
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
::memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].iterate(filler);
filler._dataSize <<= 1;
}
}
// ============================================================================
// [asmjit::ConstPool - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_constpool) {
Zone zone(32384 - Zone::kZoneOverhead);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = 1000000;
INFO("Adding %u constants to the pool.", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(prevOffset == 0,
"pool.add() - First constant should have zero offset.");
for (i = 1; i < kCount; i++) {
c++;
EXPECT(pool.add(&c, 8, curOffset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(prevOffset + 8 == curOffset,
"pool.add() - Returned incorrect curOffset.");
EXPECT(pool.getSize() == (i + 1) * 8,
"pool.getSize() - Reported incorrect size.");
prevOffset = curOffset;
}
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
}
INFO("Retrieving %u constants from the pool.", kCount);
{
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == i * 8,
"pool.add() - Should have reused constant.");
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns.");
{
uint32_t c = 0x01010101;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 4, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == i * 8,
"pool.add() - Should reuse existing constant.");
c++;
}
}
INFO("Adding 2 byte constant to misalign the current offset.");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == kCount * 8,
"pool.add() - Didn't return expected position.");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
}
INFO("Adding 8 byte constant to check if pool gets aligned again.");
{
uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF);
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == kCount * 8 + 8,
"pool.add() - Didn't return aligned offset.");
}
INFO("Adding 2 byte constant to verify the gap is filled.");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error.");
EXPECT(offset == kCount * 8 + 2,
"pool.add() - Didn't fill the gap.");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment.");
}
INFO("Checking reset functionality.");
{
pool.reset();
EXPECT(pool.getSize() == 0,
"pool.getSize() - Expected pool size to be zero.");
EXPECT(pool.getAlignment() == 0,
"pool.getSize() - Expected pool alignment to be zero.");
}
INFO("Checking pool alignment when combined constants are added.");
{
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
EXPECT(pool.getSize() == 1,
"pool.getSize() - Expected pool size to be 1 byte.");
EXPECT(pool.getAlignment() == 1,
"pool.getSize() - Expected pool alignment to be 1 byte.");
EXPECT(offset == 0,
"pool.getSize() - Expected offset returned to be zero.");
pool.add(bytes, 2, offset);
EXPECT(pool.getSize() == 4,
"pool.getSize() - Expected pool size to be 4 bytes.");
EXPECT(pool.getAlignment() == 2,
"pool.getSize() - Expected pool alignment to be 2 bytes.");
EXPECT(offset == 2,
"pool.getSize() - Expected offset returned to be 2.");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes.");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes.");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 4.");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes.");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes.");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 8.");
pool.add(bytes, 32, offset);
EXPECT(pool.getSize() == 64,
"pool.getSize() - Expected pool size to be 64 bytes.");
EXPECT(pool.getAlignment() == 32,
"pool.getSize() - Expected pool alignment to be 32 bytes.");
EXPECT(offset == 32,
"pool.getSize() - Expected offset returned to be 32.");
}
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,283 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CONSTPOOL_H
#define _ASMJIT_BASE_CONSTPOOL_H
// [Dependencies]
#include "../base/zone.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::ConstPool]
// ============================================================================
//! Constant pool.
class ConstPool {
public:
ASMJIT_NO_COPY(ConstPool)
enum {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndexCount = 6
};
// --------------------------------------------------------------------------
// [Gap]
// --------------------------------------------------------------------------
//! \internal
//!
//! Zone-allocated const-pool gap.
struct Gap {
//! Link to the next gap
Gap* _next;
//! Offset of the gap.
size_t _offset;
//! Remaining bytes of the gap (basically a gap size).
size_t _length;
};
// --------------------------------------------------------------------------
// [Node]
// --------------------------------------------------------------------------
//! \internal
//!
//! Zone-allocated const-pool node.
struct Node {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Left/Right nodes.
Node* _link[2];
//! Horizontal level for balance.
uint32_t _level : 31;
//! Whether this constant is shared with another.
uint32_t _shared : 1;
//! Data offset from the beginning of the pool.
uint32_t _offset;
};
// --------------------------------------------------------------------------
// [Tree]
// --------------------------------------------------------------------------
//! \internal
//!
//! Zone-allocated const-pool tree.
struct Tree {
enum {
//! Maximum tree height == log2(1 << 64).
kHeightLimit = 64
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE Tree(size_t dataSize = 0) noexcept
: _root(nullptr),
_length(0),
_dataSize(dataSize) {}
ASMJIT_INLINE ~Tree() {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept {
_root = nullptr;
_length = 0;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; }
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
ASMJIT_INLINE void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(isEmpty());
_dataSize = dataSize;
}
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_API Node* get(const void* data) noexcept;
ASMJIT_API void put(Node* node) noexcept;
// --------------------------------------------------------------------------
// [Iterate]
// --------------------------------------------------------------------------
template<typename Visitor>
ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept {
Node* node = const_cast<Node*>(_root);
if (node == nullptr)
return;
Node* stack[kHeightLimit];
size_t top = 0;
for (;;) {
Node* left = node->_link[0];
if (left != nullptr) {
ASMJIT_ASSERT(top != kHeightLimit);
stack[top++] = node;
node = left;
continue;
}
L_Visit:
visitor.visit(node);
node = node->_link[1];
if (node != nullptr)
continue;
if (top == 0)
return;
node = stack[--top];
goto L_Visit;
}
}
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (node == nullptr)
return nullptr;
node->_link[0] = nullptr;
node->_link[1] = nullptr;
node->_level = 1;
node->_shared = shared;
node->_offset = static_cast<uint32_t>(offset);
::memcpy(node->getData(), data, size);
return node;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Root of the tree
Node* _root;
//! Length of the tree (count of nodes).
size_t _length;
//! Size of the data.
size_t _dataSize;
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API ConstPool(Zone* zone) noexcept;
ASMJIT_API ~ConstPool() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset() noexcept;
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Get whether the constant-pool is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _size == 0; }
//! Get the size of the constant-pool in bytes.
ASMJIT_INLINE size_t getSize() const noexcept { return _size; }
//! Get minimum alignment.
ASMJIT_INLINE size_t getAlignment() const noexcept { return _alignment; }
//! Add a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
//! The constant is added to the pool only if it doesn't not exist, otherwise
//! cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add
//! 8-byte constant 0x1122334455667788 it will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used
//! frequently. However, AsmJit is not able to reallocate a constant that has
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
// --------------------------------------------------------------------------
// [Fill]
// --------------------------------------------------------------------------
//! Fill the destination with the constants from the pool.
ASMJIT_API void fill(void* dst) const noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Zone allocator.
Zone* _zone;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
//! Size of the pool (in bytes).
size_t _size;
//! Alignemnt.
size_t _alignment;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CONSTPOOL_H

View File

@ -0,0 +1,374 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/containers.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::StringBuilder - Construction / Destruction]
// ============================================================================
// Should be placed in read-only memory.
static const char StringBuilder_empty[4] = { 0 };
StringBuilder::StringBuilder() noexcept
: _data(const_cast<char*>(StringBuilder_empty)),
_length(0),
_capacity(0),
_canFree(false) {}
StringBuilder::~StringBuilder() noexcept {
if (_canFree)
ASMJIT_FREE(_data);
}
// ============================================================================
// [asmjit::StringBuilder - Prepare / Reserve]
// ============================================================================
char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
if (op == kStringOpSet) {
// We don't care here, but we can't return a NULL pointer since it indicates
// failure in memory allocation.
if (len == 0) {
if (_data != StringBuilder_empty)
_data[0] = 0;
_length = 0;
return _data;
}
if (_capacity < len) {
if (len >= IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2)
return nullptr;
size_t to = Utils::alignTo<size_t>(len, sizeof(intptr_t));
if (to < 256 - sizeof(intptr_t))
to = 256 - sizeof(intptr_t);
char* newData = static_cast<char*>(ASMJIT_ALLOC(to + sizeof(intptr_t)));
if (newData == nullptr) {
clear();
return nullptr;
}
if (_canFree)
ASMJIT_FREE(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
}
_data[len] = 0;
_length = len;
ASMJIT_ASSERT(_length <= _capacity);
return _data;
}
// --------------------------------------------------------------------------
// [Append]
// --------------------------------------------------------------------------
else {
// We don't care here, but we can't return a nullptr pointer since it indicates
// failure in memory allocation.
if (len == 0)
return _data + _length;
// Overflow.
if (IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2 - _length < len)
return nullptr;
size_t after = _length + len;
if (_capacity < after) {
size_t to = _capacity;
if (to < 256)
to = 256;
while (to < 1024 * 1024 && to < after)
to *= 2;
if (to < after) {
to = after;
if (to < (IntTraits<size_t>::maxValue() - 1024 * 32))
to = Utils::alignTo<size_t>(to, 1024 * 32);
}
to = Utils::alignTo<size_t>(to, sizeof(intptr_t));
char* newData = static_cast<char*>(ASMJIT_ALLOC(to + sizeof(intptr_t)));
if (newData == nullptr)
return nullptr;
::memcpy(newData, _data, _length);
if (_canFree)
ASMJIT_FREE(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
}
char* ret = _data + _length;
_data[after] = 0;
_length = after;
ASMJIT_ASSERT(_length <= _capacity);
return ret;
}
}
bool StringBuilder::reserve(size_t to) noexcept {
if (_capacity >= to)
return true;
if (to >= IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2)
return false;
to = Utils::alignTo<size_t>(to, sizeof(intptr_t));
char* newData = static_cast<char*>(ASMJIT_ALLOC(to + sizeof(intptr_t)));
if (newData == nullptr)
return false;
::memcpy(newData, _data, _length + 1);
if (_canFree)
ASMJIT_FREE(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
return true;
}
// ============================================================================
// [asmjit::StringBuilder - Clear]
// ============================================================================
void StringBuilder::clear() noexcept {
if (_data != StringBuilder_empty)
_data[0] = 0;
_length = 0;
}
// ============================================================================
// [asmjit::StringBuilder - Methods]
// ============================================================================
bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept {
if (len == kInvalidIndex)
len = str != nullptr ? ::strlen(str) : static_cast<size_t>(0);
char* p = prepare(op, len);
if (p == nullptr)
return false;
::memcpy(p, str, len);
return true;
}
bool StringBuilder::_opChar(uint32_t op, char c) noexcept {
char* p = prepare(op, 1);
if (p == nullptr)
return false;
*p = c;
return true;
}
bool StringBuilder::_opChars(uint32_t op, char c, size_t len) noexcept {
char* p = prepare(op, len);
if (p == nullptr)
return false;
::memset(p, c, len);
return true;
}
static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
if (base < 2 || base > 36)
base = 10;
char buf[128];
char* p = buf + ASMJIT_ARRAY_SIZE(buf);
uint64_t orig = i;
char sign = '\0';
// --------------------------------------------------------------------------
// [Sign]
// --------------------------------------------------------------------------
if ((flags & kStringFormatSigned) != 0 && static_cast<int64_t>(i) < 0) {
i = static_cast<uint64_t>(-static_cast<int64_t>(i));
sign = '-';
}
else if ((flags & kStringFormatShowSign) != 0) {
sign = '+';
}
else if ((flags & kStringFormatShowSpace) != 0) {
sign = ' ';
}
// --------------------------------------------------------------------------
// [Number]
// --------------------------------------------------------------------------
do {
uint64_t d = i / base;
uint64_t r = i % base;
*--p = StringBuilder_numbers[r];
i = d;
} while (i);
size_t numberLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p);
// --------------------------------------------------------------------------
// [Alternate Form]
// --------------------------------------------------------------------------
if ((flags & kStringFormatAlternate) != 0) {
if (base == 8) {
if (orig != 0)
*--p = '0';
}
if (base == 16) {
*--p = 'x';
*--p = '0';
}
}
// --------------------------------------------------------------------------
// [Width]
// --------------------------------------------------------------------------
if (sign != 0)
*--p = sign;
if (width > 256)
width = 256;
if (width <= numberLength)
width = 0;
else
width -= numberLength;
// --------------------------------------------------------------------------
// Write]
// --------------------------------------------------------------------------
size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength;
char* data = prepare(op, prefixLength + width + numberLength);
if (data == nullptr)
return false;
::memcpy(data, p, prefixLength);
data += prefixLength;
::memset(data, '0', width);
data += width;
::memcpy(data, p + prefixLength, numberLength);
return true;
}
bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept {
if (len >= IntTraits<size_t>::maxValue() / 2)
return false;
char* dst = prepare(op, len * 2);
if (dst == nullptr)
return false;
const char* src = static_cast<const char*>(data);
for (size_t i = 0; i < len; i++, dst += 2, src += 1)
{
dst[0] = StringBuilder_numbers[(src[0] >> 4) & 0xF];
dst[1] = StringBuilder_numbers[(src[0] ) & 0xF];
}
return true;
}
bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
char buf[1024];
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
return _opString(op, buf);
}
bool StringBuilder::setFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
va_start(ap, fmt);
result = _opVFormat(kStringOpSet, fmt, ap);
va_end(ap);
return result;
}
bool StringBuilder::appendFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
va_start(ap, fmt);
result = _opVFormat(kStringOpAppend, fmt, ap);
va_end(ap);
return result;
}
bool StringBuilder::eq(const char* str, size_t len) const noexcept {
const char* aData = _data;
const char* bData = str;
size_t aLength = _length;
size_t bLength = len;
if (bLength == kInvalidIndex) {
size_t i;
for (i = 0; i < aLength; i++) {
if (aData[i] != bData[i] || bData[i] == 0)
return false;
}
return bData[i] == 0;
}
else {
if (aLength != bLength)
return false;
return ::memcmp(aData, bData, aLength) == 0;
}
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,550 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CONTAINERS_H
#define _ASMJIT_BASE_CONTAINERS_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::BitArray]
// ============================================================================
//! Fixed size bit-array.
//!
//! Used by variable liveness analysis.
struct BitArray {
// --------------------------------------------------------------------------
// [Enums]
// --------------------------------------------------------------------------
enum {
kEntitySize = static_cast<int>(sizeof(uintptr_t)),
kEntityBits = kEntitySize * 8
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept {
return (data[index / kEntityBits] >> (index % kEntityBits)) & 1;
}
ASMJIT_INLINE void setBit(uint32_t index) noexcept {
data[index / kEntityBits] |= static_cast<uintptr_t>(1) << (index % kEntityBits);
}
ASMJIT_INLINE void delBit(uint32_t index) noexcept {
data[index / kEntityBits] &= ~(static_cast<uintptr_t>(1) << (index % kEntityBits));
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`.
ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) noexcept {
return addBits(this, s0, len);
}
ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] | s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) noexcept {
return andBits(this, s1, len);
}
ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) noexcept {
return delBits(this, s1, len);
}
ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & ~s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) noexcept {
return _addBitsDelSource(this, s1, len);
}
ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t a = s0->data[i];
uintptr_t b = s1->data[i];
this->data[i] = a | b;
b &= ~a;
s1->data[i] = b;
r |= b;
}
return r != 0;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uintptr_t data[1];
};
// ============================================================================
// [asmjit::PodList<T>]
// ============================================================================
//! \internal
template <typename T>
class PodList {
public:
ASMJIT_NO_COPY(PodList<T>)
// --------------------------------------------------------------------------
// [Link]
// --------------------------------------------------------------------------
struct Link {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get next node.
ASMJIT_INLINE Link* getNext() const noexcept { return _next; }
//! Get value.
ASMJIT_INLINE T getValue() const noexcept { return _value; }
//! Set value to `value`.
ASMJIT_INLINE void setValue(const T& value) noexcept { _value = value; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Link* _next;
T _value;
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE PodList() noexcept : _first(nullptr), _last(nullptr) {}
ASMJIT_INLINE ~PodList() noexcept {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const noexcept { return _first != nullptr; }
ASMJIT_INLINE Link* getFirst() const noexcept { return _first; }
ASMJIT_INLINE Link* getLast() const noexcept { return _last; }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept {
_first = nullptr;
_last = nullptr;
}
ASMJIT_INLINE void prepend(Link* link) noexcept {
link->_next = _first;
if (_first == nullptr)
_last = link;
_first = link;
}
ASMJIT_INLINE void append(Link* link) noexcept {
link->_next = nullptr;
if (_first == nullptr)
_first = link;
else
_last->_next = link;
_last = link;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Link* _first;
Link* _last;
};
// ============================================================================
// [asmjit::StringBuilder]
// ============================================================================
//! String builder.
//!
//! String builder was designed to be able to build a string using append like
//! operation to append numbers, other strings, or signle characters. It can
//! allocate it's own buffer or use a buffer created on the stack.
//!
//! String builder contains method specific to AsmJit functionality, used for
//! logging or HTML output.
class StringBuilder {
public:
ASMJIT_NO_COPY(StringBuilder)
// --------------------------------------------------------------------------
// [Enums]
// --------------------------------------------------------------------------
//! \internal
//!
//! String operation.
ASMJIT_ENUM(StringOp) {
//! Replace the current string by a given content.
kStringOpSet = 0,
//! Append a given content to the current string.
kStringOpAppend = 1
};
//! \internal
//!
//! String format flags.
ASMJIT_ENUM(StringFormatFlags) {
kStringFormatShowSign = 0x00000001,
kStringFormatShowSpace = 0x00000002,
kStringFormatAlternate = 0x00000004,
kStringFormatSigned = 0x80000000
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API StringBuilder() noexcept;
ASMJIT_API ~StringBuilder() noexcept;
ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get string builder capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
//! Get null-terminated string data.
ASMJIT_INLINE char* getData() noexcept { return _data; }
//! Get null-terminated string data (const).
ASMJIT_INLINE const char* getData() const noexcept { return _data; }
// --------------------------------------------------------------------------
// [Prepare / Reserve]
// --------------------------------------------------------------------------
//! Prepare to set/append.
ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept;
//! Reserve `to` bytes in string builder.
ASMJIT_API bool reserve(size_t to) noexcept;
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
//! Clear the content in String builder.
ASMJIT_API void clear() noexcept;
// --------------------------------------------------------------------------
// [Op]
// --------------------------------------------------------------------------
ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex) noexcept;
ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
ASMJIT_API bool _opChar(uint32_t op, char c) noexcept;
ASMJIT_API bool _opChars(uint32_t op, char c, size_t len) noexcept;
ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Replace the current content by `str` of `len`.
ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) noexcept {
return _opString(kStringOpSet, str, len);
}
//! Replace the current content by formatted string `fmt`.
ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kStringOpSet, fmt, ap);
}
//! Replace the current content by formatted string `fmt`.
ASMJIT_API bool setFormat(const char* fmt, ...) noexcept;
//! Replace the current content by `c` character.
ASMJIT_INLINE bool setChar(char c) noexcept {
return _opChar(kStringOpSet, c);
}
//! Replace the current content by `c` of `len`.
ASMJIT_INLINE bool setChars(char c, size_t len) noexcept {
return _opChars(kStringOpSet, c, len);
}
//! Replace the current content by formatted integer `i`.
ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned);
}
//! Replace the current content by formatted integer `i`.
ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags);
}
//! Replace the current content by the given `data` converted to a HEX string.
ASMJIT_INLINE bool setHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpSet, data, len);
}
// --------------------------------------------------------------------------
// [Append]
// --------------------------------------------------------------------------
//! Append `str` of `len`.
ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) noexcept {
return _opString(kStringOpAppend, str, len);
}
//! Append a formatted string `fmt` to the current content.
ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kStringOpAppend, fmt, ap);
}
//! Append a formatted string `fmt` to the current content.
ASMJIT_API bool appendFormat(const char* fmt, ...) noexcept;
//! Append `c` character.
ASMJIT_INLINE bool appendChar(char c) noexcept {
return _opChar(kStringOpAppend, c);
}
//! Append `c` of `len`.
ASMJIT_INLINE bool appendChars(char c, size_t len) noexcept {
return _opChars(kStringOpAppend, c, len);
}
//! Append `i`.
ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, static_cast<uint64_t>(i), base, width, flags | kStringFormatSigned);
}
//! Append `i`.
ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, i, base, width, flags);
}
//! Append the given `data` converted to a HEX string.
ASMJIT_INLINE bool appendHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpAppend, data, len);
}
// --------------------------------------------------------------------------
// [_Append]
// --------------------------------------------------------------------------
//! Append `str` of `len`, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) noexcept {
// len should be a constant if we are inlining.
if (len == kInvalidIndex) {
char* p = &_data[_length];
while (*str) {
ASMJIT_ASSERT(p < _data + _capacity);
*p++ = *str++;
}
*p = '\0';
_length = (size_t)(p - _data);
}
else {
ASMJIT_ASSERT(_capacity - _length >= len);
char* p = &_data[_length];
char* pEnd = p + len;
while (p < pEnd)
*p++ = *str++;
*p = '\0';
_length += len;
}
}
//! Append `c` character, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendChar(char c) noexcept {
ASMJIT_ASSERT(_capacity - _length >= 1);
_data[_length] = c;
_length++;
_data[_length] = '\0';
}
//! Append `c` of `len`, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendChars(char c, size_t len) noexcept {
ASMJIT_ASSERT(_capacity - _length >= len);
char* p = &_data[_length];
char* pEnd = p + len;
while (p < pEnd)
*p++ = c;
*p = '\0';
_length += len;
}
ASMJIT_INLINE void _appendUInt32(uint32_t i) noexcept {
char buf_[32];
char* pEnd = buf_ + ASMJIT_ARRAY_SIZE(buf_);
char* pBuf = pEnd;
do {
uint32_t d = i / 10;
uint32_t r = i % 10;
*--pBuf = static_cast<uint8_t>(r + '0');
i = d;
} while (i);
ASMJIT_ASSERT(_capacity - _length >= (size_t)(pEnd - pBuf));
char* p = &_data[_length];
do {
*p++ = *pBuf;
} while (++pBuf != pEnd);
*p = '\0';
_length = (size_t)(p - _data);
}
// --------------------------------------------------------------------------
// [Eq]
// --------------------------------------------------------------------------
//! Check for equality with other `str` of `len`.
ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const noexcept;
//! Check for equality with `other`.
ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); }
ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); }
ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); }
ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! String data.
char* _data;
//! Length.
size_t _length;
//! Capacity.
size_t _capacity;
//! Whether the string can be freed.
size_t _canFree;
};
// ============================================================================
// [asmjit::StringBuilderTmp]
// ============================================================================
//! Temporary string builder, has statically allocated `N` bytes.
template<size_t N>
class StringBuilderTmp : public StringBuilder {
public:
ASMJIT_NO_COPY(StringBuilderTmp<N>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) {
_data = _embeddedData;
_data[0] = 0;
_length = 0;
_capacity = N;
_canFree = false;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Embedded data.
char _embeddedData[static_cast<size_t>(
N + 1 + sizeof(intptr_t)) & ~static_cast<size_t>(sizeof(intptr_t) - 1)];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CONTAINERS_H

View File

@ -0,0 +1,643 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/cpuinfo.h"
#include "../base/utils.h"
#if ASMJIT_OS_POSIX
# include <errno.h>
# include <sys/statvfs.h>
# include <sys/utsname.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
# if ASMJIT_CC_MSC_GE(14, 0, 0)
# include <intrin.h> // Required by `__cpuid()` and `_xgetbv()`.
# endif // _MSC_VER >= 1400
#endif
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
# if ASMJIT_OS_LINUX
# include <sys/auxv.h> // Required by `getauxval()`.
# endif
#endif
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CpuInfo - Detect ARM & ARM64]
// ============================================================================
// ARM information has to be retrieved by the OS (this is how ARM was designed).
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_ARM64
static void armPopulateBaseline64Features(CpuInfo* cpuInfo) noexcept {
// Thumb (including all variations) is only supported on ARM32.
// ARM64 is based on ARMv8 and newer.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
// ARM64 comes with these features by default.
cpuInfo->addFeature(CpuInfo::kArmFeatureDSP);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIV);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP4);
}
#endif // ASMJIT_ARCH_ARM64
#if ASMJIT_OS_WINDOWS
//! \internal
//!
//! Detect ARM CPU features on Windows.
//!
//! The detection is based on `IsProcessorFeaturePresent()` API call.
static void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
cpuInfo->setArch(kArchArm32);
// Windows for ARM requires at least ARMv7 with DSP extensions.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureDSP);
// Windows for ARM requires VFP3.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3);
// Windows for ARM requires and uses THUMB2.
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
#else
cpuInfo->setArch(kArchArm64);
armPopulateBaseline64Features(cpuInfo);
#endif
// Windows for ARM requires NEON.
cpuInfo->addFeature(CpuInfo::kArmFeatureNEON);
// Detect additional CPU features by calling `IsProcessorFeaturePresent()`.
struct WinPFPMapping {
uint32_t pfpId, featureId;
};
static const WinPFPMapping mapping[] = {
{ PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFP4 },
{ PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 },
{ PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIV },
{ PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 }
};
for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++)
if (::IsProcessorFeaturePresent(mapping[i].pfpId))
cpuInfo->addFeature(mapping[i].featureId);
}
#endif // ASMJIT_OS_WINDOWS
#if ASMJIT_OS_LINUX
struct LinuxHWCapMapping {
uint32_t hwcapMask, featureId;
};
static void armDetectHWCaps(CpuInfo* cpuInfo,
unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept {
unsigned long mask = getauxval(type);
for (size_t i = 0; i < length; i++)
if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask)
cpuInfo->addFeature(mapping[i].featureId);
}
//! \internal
//!
//! Detect ARM CPU features on Linux.
//!
//! The detection is based on `getauxval()`.
static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
cpuInfo->setArch(kArchArm32);
// `AT_HWCAP` provides ARMv7 (and less) related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFP3 },
{ /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFP4 },
{ /* HWCAP_IDIVA */ (3 << 17), CpuInfo::kArmFeatureIDIV },
{ /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 },
{ /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureNEON },
{ /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureDSP }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// VFP3 implies VFP2.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3))
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
// VFP2 implies ARMv6.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP2))
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
// VFP3 or NEON implies ARMv7.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureNEON))
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
// `AT_HWCAP2` provides ARMv8 related flags.
static const LinuxHWCapMapping hwCap2Mapping[] = {
{ /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES },
{ /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCap2Mapping));
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
}
#else
cpuInfo->setArch(kArchArm64);
armPopulateBaseline64Features(cpuInfo);
// `AT_HWCAP` provides ARMv8 related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureNEON },
{ /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES },
{ /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 }
{ /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// `AT_HWCAP2` is not used at the moment.
#endif
}
#endif // ASMJIT_OS_LINUX
static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_OS_WINDOWS
armDetectCpuInfoOnWindows(cpuInfo);
#elif ASMJIT_OS_LINUX
armDetectCpuInfoOnLinux(cpuInfo);
#else
# error "[asmjit] armDetectCpuInfo() - Unsupported OS."
#endif
}
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
// ============================================================================
// [asmjit::CpuInfo - Detect X86 & X64]
// ============================================================================
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
//! \internal
//!
//! X86 CPUID result.
struct CpuIdResult {
uint32_t eax, ebx, ecx, edx;
};
//! \internal
//!
//! Content of XCR register, result of XGETBV instruction.
struct XGetBVResult {
uint32_t eax, edx;
};
#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(15, 0, 30729) && ASMJIT_ARCH_X64
//! \internal
//!
//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However,
//! 64-bit calling convention specifies the first parameter to be passed in
//! ECX, so we may be lucky if compiler doesn't move the register, otherwise
//! the result would be wrong.
static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept {
__cpuid(reinterpret_cast<int*>(result), inEax);
}
#endif
//! \internal
//!
//! Wrapper to call `cpuid` instruction.
static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint32_t inEcx = 0) noexcept {
#if ASMJIT_CC_MSC && ASMJIT_CC_MSC_GE(15, 0, 30729)
__cpuidex(reinterpret_cast<int*>(result), inEax, inEcx);
#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X64
x86CallCpuIdWorkaround(inEcx, inEax, result);
#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X86
uint32_t paramEax = inEax;
uint32_t paramEcx = inEcx;
uint32_t* out = reinterpret_cast<uint32_t*>(result);
__asm {
mov eax, paramEax
mov ecx, paramEcx
mov edi, out
cpuid
mov dword ptr[edi + 0], eax
mov dword ptr[edi + 4], ebx
mov dword ptr[edi + 8], ecx
mov dword ptr[edi + 12], edx
}
#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X86
__asm__ __volatile__(
"mov %%ebx, %%edi\n"
"cpuid\n"
"xchg %%edi, %%ebx\n"
: "=a"(result->eax),
"=D"(result->ebx),
"=c"(result->ecx),
"=d"(result->edx)
: "a"(inEax),
"c"(inEcx)
);
#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X64
__asm__ __volatile__( \
"mov %%rbx, %%rdi\n"
"cpuid\n"
"xchg %%rdi, %%rbx\n"
: "=a"(result->eax),
"=D"(result->ebx),
"=c"(result->ecx),
"=d"(result->edx)
: "a"(inEax),
"c"(inEcx)
);
#else
# error "[asmjit] x86CallCpuid() - Unsupported compiler."
#endif
}
//! \internal
//!
//! Wrapper to call `xgetbv` instruction.
static void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+
uint64_t value = _xgetbv(inEcx);
result->eax = static_cast<uint32_t>(value & 0xFFFFFFFFU);
result->edx = static_cast<uint32_t>(value >> 32);
#elif ASMJIT_CC_GCC || ASMJIT_CC_CLANG
uint32_t outEax;
uint32_t outEdx;
// Replaced, because the world is not perfect:
// __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
__asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
result->eax = outEax;
result->edx = outEdx;
#else
result->eax = 0;
result->edx = 0;
#endif
}
//! \internal
//!
//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID.
static uint32_t x86GetCpuVendorID(const char* vendorString) noexcept {
struct VendorData {
uint32_t id;
char text[12];
};
static const VendorData vendorList[] = {
{ CpuInfo::kVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } },
{ CpuInfo::kVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } },
{ CpuInfo::kVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } },
{ CpuInfo::kVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } }
};
uint32_t dw0 = reinterpret_cast<const uint32_t*>(vendorString)[0];
uint32_t dw1 = reinterpret_cast<const uint32_t*>(vendorString)[1];
uint32_t dw2 = reinterpret_cast<const uint32_t*>(vendorString)[2];
for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(vendorList); i++) {
if (dw0 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[0] &&
dw1 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[1] &&
dw2 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[2])
return vendorList[i].id;
}
return CpuInfo::kVendorNone;
}
static ASMJIT_INLINE void x86SimplifyBrandString(char* s) noexcept {
// Used to always clear the current character to ensure that the result
// doesn't contain garbage after the new zero terminator.
char* d = s;
char prev = 0;
char curr = s[0];
s[0] = '\0';
for (;;) {
if (curr == 0)
break;
if (curr == ' ') {
if (prev == '@' || s[1] == ' ' || s[1] == '@')
goto L_Skip;
}
d[0] = curr;
d++;
prev = curr;
L_Skip:
curr = *++s;
s[0] = '\0';
}
d[0] = '\0';
}
static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
uint32_t i, maxId;
CpuIdResult regs;
XGetBVResult xcr0 = { 0, 0 };
// Architecture is known at compile-time.
cpuInfo->setArch(ASMJIT_ARCH_X86 ? kArchX86 : kArchX64);
// --------------------------------------------------------------------------
// [CPUID EAX=0x0]
// --------------------------------------------------------------------------
// Get vendor string/id.
x86CallCpuId(&regs, 0x0);
maxId = regs.eax;
::memcpy(cpuInfo->_vendorString + 0, &regs.ebx, 4);
::memcpy(cpuInfo->_vendorString + 4, &regs.edx, 4);
::memcpy(cpuInfo->_vendorString + 8, &regs.ecx, 4);
cpuInfo->_vendorId = x86GetCpuVendorID(cpuInfo->_vendorString);
// --------------------------------------------------------------------------
// [CPUID EAX=0x1]
// --------------------------------------------------------------------------
if (maxId >= 0x1) {
// Get feature flags in ECX/EDX and family/model in EAX.
x86CallCpuId(&regs, 0x1);
// Fill family and model fields.
cpuInfo->_family = (regs.eax >> 8) & 0x0F;
cpuInfo->_model = (regs.eax >> 4) & 0x0F;
cpuInfo->_stepping = (regs.eax ) & 0x0F;
// Use extended family and model fields.
if (cpuInfo->_family == 0x0F) {
cpuInfo->_family += ((regs.eax >> 20) & 0xFF);
cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4;
}
cpuInfo->_x86Data._processorType = ((regs.eax >> 12) & 0x03);
cpuInfo->_x86Data._brandIndex = ((regs.ebx ) & 0xFF);
cpuInfo->_x86Data._flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8;
cpuInfo->_x86Data._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE3);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCLMULQDQ);
if (regs.ecx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureMONITOR);
if (regs.ecx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSSE3);
if (regs.ecx & 0x00002000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG16B);
if (regs.ecx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_1);
if (regs.ecx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_2);
if (regs.ecx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVBE);
if (regs.ecx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePOPCNT);
if (regs.ecx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAESNI);
if (regs.ecx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE);
if (regs.ecx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE_OS);
if (regs.ecx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDRAND);
if (regs.edx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSC);
if (regs.edx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG8B);
if (regs.edx & 0x00008000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMOV);
if (regs.edx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH);
if (regs.edx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX);
if (regs.edx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR);
if (regs.edx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
.addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
.addFeature(CpuInfo::kX86FeatureSSE2);
if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT);
// AMD sets multi-threading ON if it has two or more cores.
if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == CpuInfo::kVendorAMD && (regs.edx & 0x10000000U))
cpuInfo->_hwThreadsCount = 2;
// Get the content of XCR0 if supported by CPU and enabled by OS.
if ((regs.ecx & 0x0C000000U) == 0x0C000000U)
x86CallXGetBV(&xcr0, 0);
// Detect AVX+.
if (regs.ecx & 0x10000000U) {
// - XCR0[2:1] == 11b
// XMM & YMM states need to be enabled by OS.
if ((xcr0.eax & 0x00000006U) == 0x00000006U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX);
if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA3);
if (regs.ecx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureF16C);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x7 ECX=0x0]
// --------------------------------------------------------------------------
// Detect new features if the processor supports CPUID-07.
bool maybeMPX = false;
if (maxId >= 0x7) {
x86CallCpuId(&regs, 0x7);
if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureFSGSBASE);
if (regs.ebx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI);
if (regs.ebx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureHLE);
if (regs.ebx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMEP);
if (regs.ebx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI2);
if (regs.ebx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureERMS);
if (regs.ebx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureRTM);
if (regs.ebx & 0x00004000U) maybeMPX = true;
if (regs.ebx & 0x00040000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDSEED);
if (regs.ebx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureADX);
if (regs.ebx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMAP);
if (regs.ebx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCOMMIT);
if (regs.ebx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH_OPT);
if (regs.ebx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLWB);
if (regs.ebx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSHA);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1);
// Detect AVX2.
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX))
if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2);
// Detect AVX-512+.
if (regs.ebx & 0x00010000U) {
// - XCR0[2:1] == 11b
// XMM/YMM states need to be enabled by OS.
// - XCR0[7:5] == 111b
// Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by the OS.
if ((xcr0.eax & 0x000000E6U) == 0x000000E6U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512F);
if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512DQ);
if (regs.ebx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512IFMA);
if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512PF);
if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512ER);
if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512CD);
if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512BW);
if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512VL);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512VBMI);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0xD, ECX=0x0]
// --------------------------------------------------------------------------
if (maxId >= 0xD && maybeMPX) {
x86CallCpuId(&regs, 0xD);
// Both CPUID result and XCR0 has to be enabled to have support for MPX.
if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureMPX);
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x80000000...maxId]
// --------------------------------------------------------------------------
// Several CPUID calls are required to get the whole branc string. It's easy
// to copy one DWORD at a time instead of performing a byte copy.
uint32_t* brand = reinterpret_cast<uint32_t*>(cpuInfo->_brandString);
i = maxId = 0x80000000U;
do {
x86CallCpuId(&regs, i);
switch (i) {
case 0x80000000U:
maxId = Utils::iMin<uint32_t>(regs.eax, 0x80000004);
break;
case 0x80000001U:
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureLAHF_SAHF);
if (regs.ecx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureLZCNT);
if (regs.ecx & 0x00000040U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4A);
if (regs.ecx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSSE);
if (regs.ecx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCH);
if (regs.ecx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureTBM);
if (regs.edx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureNX);
if (regs.edx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR_OPT);
if (regs.edx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSCP);
if (regs.edx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW2)
.addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW);
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
if (regs.ecx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureXOP);
if (regs.ecx & 0x00010000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA4);
}
break;
case 0x80000002U:
case 0x80000003U:
case 0x80000004U:
*brand++ = regs.eax;
*brand++ = regs.ebx;
*brand++ = regs.ecx;
*brand++ = regs.edx;
break;
default:
// Stop the loop, additional features can be detected in the future.
i = maxId;
break;
}
} while (i++ < maxId);
// Simplify CPU brand string by removing unnecessary spaces.
x86SimplifyBrandString(cpuInfo->_brandString);
}
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// ============================================================================
// [asmjit::CpuInfo - Detect - HWThreadsCount]
// ============================================================================
static uint32_t cpuDetectHWThreadsCount() noexcept {
#if ASMJIT_OS_WINDOWS
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwNumberOfProcessors;
#elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN)
long res = ::sysconf(_SC_NPROCESSORS_ONLN);
if (res <= 0) return 1;
return static_cast<uint32_t>(res);
#else
return 1;
#endif
}
// ============================================================================
// [asmjit::CpuInfo - Detect]
// ============================================================================
void CpuInfo::detect() noexcept {
reset();
// Detect the number of hardware threads available.
_hwThreadsCount = cpuDetectHWThreadsCount();
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
armDetectCpuInfo(this);
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
x86DetectCpuInfo(this);
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
}
// ============================================================================
// [asmjit::CpuInfo - GetHost]
// ============================================================================
struct HostCpuInfo : public CpuInfo {
ASMJIT_INLINE HostCpuInfo() noexcept : CpuInfo() { detect(); }
};
const CpuInfo& CpuInfo::getHost() noexcept {
static HostCpuInfo host;
return host;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,316 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CPUINFO_H
#define _ASMJIT_BASE_CPUINFO_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CpuInfo]
// ============================================================================
//! CPU information.
class CpuInfo {
public:
// --------------------------------------------------------------------------
// [Vendor]
// --------------------------------------------------------------------------
//! CPU vendor ID.
ASMJIT_ENUM(Vendor) {
kVendorNone = 0, //!< Generic or unknown.
kVendorIntel = 1, //!< Intel vendor.
kVendorAMD = 2, //!< AMD vendor.
kVendorVIA = 3 //!< VIA vendor.
};
// --------------------------------------------------------------------------
// [ArmFeatures]
// --------------------------------------------------------------------------
//! ARM/ARM64 CPU features.
ASMJIT_ENUM(ArmFeatures) {
kArmFeatureV6, //!< ARMv6 instruction set.
kArmFeatureV7, //!< ARMv7 instruction set.
kArmFeatureV8, //!< ARMv8 instruction set.
kArmFeatureTHUMB, //!< CPU provides THUMB v1 instruction set (ARM only).
kArmFeatureTHUMB2, //!< CPU provides THUMB v2 instruction set (ARM only).
kArmFeatureVFP2, //!< CPU provides VFPv2 instruction set.
kArmFeatureVFP3, //!< CPU provides VFPv3 instruction set.
kArmFeatureVFP4, //!< CPU provides VFPv4 instruction set.
kArmFeatureVFP_D32, //!< CPU provides 32 VFP-D (64-bit) registers.
kArmFeatureNEON, //!< CPU provides NEON instruction set.
kArmFeatureDSP, //!< CPU provides DSP extensions.
kArmFeatureIDIV, //!< CPU provides hardware support for SDIV and UDIV.
kArmFeatureAES, //!< CPU provides AES instructions (ARM64 only).
kArmFeatureCRC32, //!< CPU provides CRC32 instructions (ARM64 only).
kArmFeaturePMULL, //!< CPU provides PMULL instructions (ARM64 only).
kArmFeatureSHA1, //!< CPU provides SHA1 instructions (ARM64 only).
kArmFeatureSHA256, //!< CPU provides SHA256 instructions (ARM64 only).
kArmFeatureAtomics64, //!< CPU provides 64-bit load/store atomics (ARM64 only).
kArmFeaturesCount //!< Count of ARM/ARM64 CPU features.
};
// --------------------------------------------------------------------------
// [X86Features]
// --------------------------------------------------------------------------
//! X86/X64 CPU features.
ASMJIT_ENUM(X86Features) {
kX86FeatureNX = 0, //!< CPU has Not-Execute-Bit.
kX86FeatureMT, //!< CPU has multi-threading.
kX86FeatureRDTSC, //!< CPU has RDTSC.
kX86FeatureRDTSCP, //!< CPU has RDTSCP.
kX86FeatureCMOV, //!< CPU has CMOV.
kX86FeatureCMPXCHG8B, //!< CPU has CMPXCHG8B.
kX86FeatureCMPXCHG16B, //!< CPU has CMPXCHG16B (x64).
kX86FeatureCLFLUSH, //!< CPU has CLFUSH.
kX86FeatureCLFLUSH_OPT, //!< CPU has CLFUSH (optimized).
kX86FeatureCLWB, //!< CPU has CLWB.
kX86FeaturePCOMMIT, //!< CPU has PCOMMIT.
kX86FeaturePREFETCH, //!< CPU has PREFETCH.
kX86FeaturePREFETCHWT1, //!< CPU has PREFETCHWT1.
kX86FeatureLAHF_SAHF, //!< CPU has LAHF/SAHF.
kX86FeatureFXSR, //!< CPU has FXSAVE/FXRSTOR.
kX86FeatureFXSR_OPT, //!< CPU has FXSAVE/FXRSTOR (optimized).
kX86FeatureMMX, //!< CPU has MMX.
kX86FeatureMMX2, //!< CPU has extended MMX.
kX86Feature3DNOW, //!< CPU has 3dNow!
kX86Feature3DNOW2, //!< CPU has enhanced 3dNow!
kX86FeatureSSE, //!< CPU has SSE.
kX86FeatureSSE2, //!< CPU has SSE2.
kX86FeatureSSE3, //!< CPU has SSE3.
kX86FeatureSSSE3, //!< CPU has SSSE3.
kX86FeatureSSE4A, //!< CPU has SSE4.A.
kX86FeatureSSE4_1, //!< CPU has SSE4.1.
kX86FeatureSSE4_2, //!< CPU has SSE4.2.
kX86FeatureMSSE, //!< CPU has Misaligned SSE (MSSE).
kX86FeatureMONITOR, //!< CPU has MONITOR and MWAIT.
kX86FeatureMOVBE, //!< CPU has MOVBE.
kX86FeaturePOPCNT, //!< CPU has POPCNT.
kX86FeatureLZCNT, //!< CPU has LZCNT.
kX86FeatureAESNI, //!< CPU has AESNI.
kX86FeaturePCLMULQDQ, //!< CPU has PCLMULQDQ.
kX86FeatureRDRAND, //!< CPU has RDRAND.
kX86FeatureRDSEED, //!< CPU has RDSEED.
kX86FeatureSMAP, //!< CPU has SMAP (supervisor-mode access prevention).
kX86FeatureSMEP, //!< CPU has SMEP (supervisor-mode execution prevention).
kX86FeatureSHA, //!< CPU has SHA-1 and SHA-256.
kX86FeatureXSAVE, //!< CPU has XSAVE support - XSAVE/XRSTOR, XSETBV/XGETBV, and XCR0.
kX86FeatureXSAVE_OS, //!< OS has enabled XSAVE, you can call XGETBV to get value of XCR0.
kX86FeatureAVX, //!< CPU has AVX.
kX86FeatureAVX2, //!< CPU has AVX2.
kX86FeatureF16C, //!< CPU has F16C.
kX86FeatureFMA3, //!< CPU has FMA3.
kX86FeatureFMA4, //!< CPU has FMA4.
kX86FeatureXOP, //!< CPU has XOP.
kX86FeatureBMI, //!< CPU has BMI (bit manipulation instructions #1).
kX86FeatureBMI2, //!< CPU has BMI2 (bit manipulation instructions #2).
kX86FeatureADX, //!< CPU has ADX (multi-precision add-carry instruction extensions).
kX86FeatureTBM, //!< CPU has TBM (trailing bit manipulation).
kX86FeatureMPX, //!< CPU has MPX (memory protection extensions).
kX86FeatureHLE, //!< CPU has HLE.
kX86FeatureRTM, //!< CPU has RTM.
kX86FeatureERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB).
kX86FeatureFSGSBASE, //!< CPU has FSGSBASE.
kX86FeatureAVX512F, //!< CPU has AVX-512F (foundation).
kX86FeatureAVX512CD, //!< CPU has AVX-512CD (conflict detection).
kX86FeatureAVX512PF, //!< CPU has AVX-512PF (prefetch instructions).
kX86FeatureAVX512ER, //!< CPU has AVX-512ER (exponential and reciprocal instructions).
kX86FeatureAVX512DQ, //!< CPU has AVX-512DQ (DWORD/QWORD).
kX86FeatureAVX512BW, //!< CPU has AVX-512BW (BYTE/WORD).
kX86FeatureAVX512VL, //!< CPU has AVX VL (vector length extensions).
kX86FeatureAVX512IFMA, //!< CPU has AVX IFMA (integer fused multiply add using 52-bit precision).
kX86FeatureAVX512VBMI, //!< CPU has AVX VBMI (vector byte manipulation instructions).
kX86FeaturesCount //!< Count of X86/X64 CPU features.
};
// --------------------------------------------------------------------------
// [Other]
// --------------------------------------------------------------------------
//! \internal
enum {
kFeaturesPerUInt32 = static_cast<int>(sizeof(uint32_t)) * 8
};
// --------------------------------------------------------------------------
// [ArmInfo]
// --------------------------------------------------------------------------
struct ArmData {
};
// --------------------------------------------------------------------------
// [X86Info]
// --------------------------------------------------------------------------
struct X86Data {
uint32_t _processorType; //!< Processor type.
uint32_t _brandIndex; //!< Brand index.
uint32_t _flushCacheLineSize; //!< Flush cache line size (in bytes).
uint32_t _maxLogicalProcessors; //!< Maximum number of addressable IDs for logical processors.
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CpuInfo() noexcept { reset(); }
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(CpuInfo)); }
// --------------------------------------------------------------------------
// [Detect]
// --------------------------------------------------------------------------
ASMJIT_API void detect() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get CPU architecture, see \Arch.
ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; }
//! Set CPU architecture, see \Arch.
ASMJIT_INLINE void setArch(uint32_t arch) noexcept { _arch = static_cast<uint8_t>(arch); }
//! Get CPU vendor string.
ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; }
//! Get CPU brand string.
ASMJIT_INLINE const char* getBrandString() const noexcept { return _brandString; }
//! Get CPU vendor ID.
ASMJIT_INLINE uint32_t getVendorId() const noexcept { return _vendorId; }
//! Get CPU family ID.
ASMJIT_INLINE uint32_t getFamily() const noexcept { return _family; }
//! Get CPU model ID.
ASMJIT_INLINE uint32_t getModel() const noexcept { return _model; }
//! Get CPU stepping.
ASMJIT_INLINE uint32_t getStepping() const noexcept { return _stepping; }
//! Get number of hardware threads available.
ASMJIT_INLINE uint32_t getHwThreadsCount() const noexcept {
return _hwThreadsCount;
}
//! Get whether CPU has a `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept {
ASMJIT_ASSERT(feature < sizeof(_features) * 8);
uint32_t pos = feature / kFeaturesPerUInt32;
uint32_t bit = feature % kFeaturesPerUInt32;
return static_cast<bool>((_features[pos] >> bit) & 0x1);
}
//! Add a CPU `feature`.
ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) noexcept {
ASMJIT_ASSERT(feature < sizeof(_features) * 8);
uint32_t pos = feature / kFeaturesPerUInt32;
uint32_t bit = feature % kFeaturesPerUInt32;
_features[pos] |= static_cast<uint32_t>(1) << bit;
return *this;
}
// --------------------------------------------------------------------------
// [Accessors - ARM]
// --------------------------------------------------------------------------
// --------------------------------------------------------------------------
// [Accessors - X86]
// --------------------------------------------------------------------------
//! Get processor type.
ASMJIT_INLINE uint32_t getX86ProcessorType() const noexcept {
return _x86Data._processorType;
}
//! Get brand index.
ASMJIT_INLINE uint32_t getX86BrandIndex() const noexcept {
return _x86Data._brandIndex;
}
//! Get flush cache line size.
ASMJIT_INLINE uint32_t getX86FlushCacheLineSize() const noexcept {
return _x86Data._flushCacheLineSize;
}
//! Get maximum logical processors count.
ASMJIT_INLINE uint32_t getX86MaxLogicalProcessors() const noexcept {
return _x86Data._maxLogicalProcessors;
}
// --------------------------------------------------------------------------
// [Statics]
// --------------------------------------------------------------------------
//! Get the host CPU information.
static ASMJIT_API const CpuInfo& getHost() noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! CPU vendor string.
char _vendorString[16];
//! CPU brand string.
char _brandString[64];
//! CPU architecture, see \ref Arch.
uint8_t _arch;
//! \internal
uint8_t _reserved[3];
//! CPU vendor id, see \ref CpuVendor.
uint32_t _vendorId;
//! CPU family ID.
uint32_t _family;
//! CPU model ID.
uint32_t _model;
//! CPU stepping.
uint32_t _stepping;
//! Number of hardware threads.
uint32_t _hwThreadsCount;
//! CPU features (bit-array).
uint32_t _features[8];
// Architecture specific data.
union {
ArmData _armData;
X86Data _x86Data;
};
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CPUINFO_H

View File

@ -0,0 +1,94 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
#if !defined(ASMJIT_DISABLE_TEXT)
static const char errorMessages[] = {
"Ok\0"
"No heap memory\0"
"No virtual memory\0"
"Invalid argument\0"
"Invalid state\0"
"Invalid architecture\0"
"Not initialized\0"
"No code generated\0"
"Code too large\0"
"Label already bound\0"
"Unknown instruction\0"
"Illegal instruction\0"
"Illegal addressing\0"
"Illegal displacement\0"
"Overlapped arguments\0"
"Unknown error\0"
};
static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) noexcept {
uint32_t i = 0;
if (id > maxId)
id = maxId;
while (i < id) {
while (p[0])
p++;
p++;
i++;
}
return p;
}
#endif // ASMJIT_DISABLE_TEXT
const char* DebugUtils::errorAsString(Error err) noexcept {
#if !defined(ASMJIT_DISABLE_TEXT)
return findPackedString(errorMessages, err, kErrorCount);
#else
static const char noMessage[] = "";
return noMessage;
#endif
}
void DebugUtils::debugOutput(const char* str) noexcept {
#if ASMJIT_OS_WINDOWS
::OutputDebugStringA(str);
#else
::fputs(str, stderr);
#endif
}
void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
"[asmjit] Assertion failed at %s (line %d):\n"
"[asmjit] %s\n", file, line, msg);
// Support buggy `snprintf` implementations.
str[1023] = '\0';
debugOutput(str);
::abort();
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,666 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_GLOBALS_H
#define _ASMJIT_BASE_GLOBALS_H
// [Dependencies]
#include "../build.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::TypeDefs]
// ============================================================================
//! AsmJit error core (unsigned integer).
typedef uint32_t Error;
//! 64-bit unsigned pointer, compatible with JIT and non-JIT generators.
//!
//! This is the preferred pointer type to use with AsmJit library. It has a
//! capability to hold any pointer for any architecture making it an ideal
//! candidate for a cross-platform code generator.
typedef uint64_t Ptr;
//! like \ref Ptr, but signed.
typedef int64_t SignedPtr;
// ============================================================================
// [asmjit::GlobalDefs]
// ============================================================================
//! Invalid index
//!
//! Invalid index is the last possible index that is never used in practice. In
//! AsmJit it is used exclusively with strings to indicate the the length of the
//! string is not known and has to be determined.
static const size_t kInvalidIndex = ~static_cast<size_t>(0);
//! Invalid base address.
static const Ptr kNoBaseAddress = static_cast<Ptr>(static_cast<SignedPtr>(-1));
//! Global constants.
ASMJIT_ENUM(GlobalDefs) {
//! Invalid value or operand id.
kInvalidValue = 0xFFFFFFFF,
//! Invalid register index.
kInvalidReg = 0xFF,
//! Invalid variable type.
kInvalidVar = 0xFF,
//! Host memory allocator overhead.
//!
//! The overhead is decremented from all zone allocators so the operating
//! system doesn't have to allocate one extra virtual page to keep tract of
//! the requested memory block.
//!
//! The number is actually a guess.
kMemAllocOverhead = sizeof(intptr_t) * 4,
//! Memory grow threshold.
//!
//! After the grow threshold is reached the capacity won't be doubled
//! anymore.
kMemAllocGrowMax = 8192 * 1024
};
// ============================================================================
// [asmjit::ArchId]
// ============================================================================
//! CPU architecture identifier.
ASMJIT_ENUM(ArchId) {
//! No/Unknown architecture.
kArchNone = 0,
//! X86 architecture (32-bit).
kArchX86 = 1,
//! X64 architecture (64-bit), also called AMD64.
kArchX64 = 2,
//! X32 architecture (64-bit with 32-bit pointers) (NOT USED ATM).
kArchX32 = 3,
//! Arm architecture (32-bit).
kArchArm32 = 4,
//! Arm64 architecture (64-bit).
kArchArm64 = 5,
#if ASMJIT_ARCH_X86
kArchHost = kArchX86
#elif ASMJIT_ARCH_X64
kArchHost = kArchX64
#elif ASMJIT_ARCH_ARM32
kArchHost = kArchArm32
#elif ASMJIT_ARCH_ARM64
kArchHost = kArchArm64
#else
# error "[asmjit] Unsupported host architecture."
#endif
};
// ============================================================================
// [asmjit::CallConv]
// ============================================================================
//! Function calling convention.
//!
//! Calling convention is a scheme that defines how function arguments are
//! passed and how the return value handled. In assembler programming it's
//! always needed to comply with function calling conventions, because even
//! small inconsistency can cause undefined behavior or application's crash.
//!
//! Platform Independent Conventions
//! --------------------------------
//!
//! - `kCallConvHost` - Should match the current C++ compiler native calling
//! convention.
//!
//! X86/X64 Specific Conventions
//! ----------------------------
//!
//! List of calling conventions for 32-bit x86 mode:
//! - `kCallConvX86CDecl` - Calling convention for C runtime.
//! - `kCallConvX86StdCall` - Calling convention for WinAPI functions.
//! - `kCallConvX86MsThisCall` - Calling convention for C++ members under
//! Windows (produced by MSVC and all MSVC compatible compilers).
//! - `kCallConvX86MsFastCall` - Fastest calling convention that can be used
//! by MSVC compiler.
//! - `kCallConvX86BorlandFastCall` - Borland fastcall convention.
//! - `kCallConvX86GccFastCall` - GCC fastcall convention (2 register arguments).
//! - `kCallConvX86GccRegParm1` - GCC regparm(1) convention.
//! - `kCallConvX86GccRegParm2` - GCC regparm(2) convention.
//! - `kCallConvX86GccRegParm3` - GCC regparm(3) convention.
//!
//! List of calling conventions for 64-bit x86 mode (x64):
//! - `kCallConvX64Win` - Windows 64-bit calling convention (WIN64 ABI).
//! - `kCallConvX64Unix` - Unix 64-bit calling convention (AMD64 ABI).
//!
//! ARM Specific Conventions
//! ------------------------
//!
//! List of ARM calling conventions:
//! - `kCallConvArm32SoftFP` - Legacy calling convention, floating point
//! arguments are passed via GP registers.
//! - `kCallConvArm32HardFP` - Modern calling convention, uses VFP registers
//! to pass floating point arguments.
ASMJIT_ENUM(CallConv) {
//! Calling convention is invalid (can't be used).
kCallConvNone = 0,
// --------------------------------------------------------------------------
// [X86]
// --------------------------------------------------------------------------
//! X86 `__cdecl` calling convention (used by C runtime and libraries).
//!
//! Compatible across MSVC and GCC.
//!
//! Arguments direction:
//! - Right to left.
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86CDecl = 1,
//! X86 `__stdcall` calling convention (used mostly by WinAPI).
//!
//! Compatible across MSVC and GCC.
//!
//! Arguments direction:
//! - Right to left.
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86StdCall = 2,
//! X86 `__thiscall` calling convention (MSVC/Intel specific).
//!
//! This is MSVC (and Intel) specific calling convention used when targeting
//! Windows platform for C++ class methods. Implicit `this` pointer (defined
//! as the first argument) is stored in `ecx` register instead of storing it
//! on the stack.
//!
//! This calling convention is implicitly used by MSVC for class functions.
//!
//! C++ class functions that have variable number of arguments use `__cdecl`
//! calling convention instead.
//!
//! Arguments direction:
//! - Right to left (except for the first argument passed in `ecx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86MsThisCall = 3,
//! X86 `__fastcall` convention (MSVC/Intel specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the right to
//! the left.
//!
//! Arguments direction:
//! - Right to left (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
//!
//! NOTE: This calling convention differs from GCC's one.
kCallConvX86MsFastCall = 4,
//! X86 `__fastcall` convention (Borland specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the left to
//! the right.
//!
//! Arguments direction:
//! - Left to right (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
//!
//! NOTE: Arguments on the stack are in passed in left to right order, which
//! is really Borland specific, all other `__fastcall` calling conventions
//! use right to left order.
kCallConvX86BorlandFastCall = 5,
//! X86 `__fastcall` convention (GCC specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the right to
//! the left.
//!
//! Arguments direction:
//! - Right to left (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Callee.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
//!
//! NOTE: This calling convention should be compatible with `kCallConvX86MsFastCall`.
kCallConvX86GccFastCall = 6,
//! X86 `regparm(1)` convention (GCC specific).
//!
//! The first argument (evaluated from the left to the right) is passed in
//! `eax` register, all others on the stack from the right to the left.
//!
//! Arguments direction:
//! - Right to left (except for the first integer passed in `eax`).
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86GccRegParm1 = 7,
//! X86 `regparm(2)` convention (GCC specific).
//!
//! The first two arguments (evaluated from the left to the right) are passed
//! in `ecx` and `edx` registers, all others on the stack from the right to
//! the left.
//!
//! Arguments direction:
//! - Right to left (except for the first two integers passed in `ecx` and `edx`).
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86GccRegParm2 = 8,
//! X86 `regparm(3)` convention (GCC specific).
//!
//! Three first parameters (evaluated from left-to-right) are in
//! EAX:EDX:ECX registers, all others on the stack in right-to-left direction.
//!
//! Arguments direction:
//! - Right to left (except for the first three integers passed in `ecx`,
//! `edx`, and `ecx`).
//!
//! Stack is cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `eax:edx` registers.
//! - Floating point - `fp0` register.
kCallConvX86GccRegParm3 = 9,
// --------------------------------------------------------------------------
// [X64]
// --------------------------------------------------------------------------
//! X64 calling convention used by Windows platform (WIN64-ABI).
//!
//! The first 4 arguments are passed in the following registers:
//! - 1. 32/64-bit integer in `rcx` and floating point argument in `xmm0`
//! - 2. 32/64-bit integer in `rdx` and floating point argument in `xmm1`
//! - 3. 32/64-bit integer in `r8` and floating point argument in `xmm2`
//! - 4. 32/64-bit integer in `r9` and floating point argument in `xmm3`
//!
//! If one or more argument from the first four doesn't match the list above
//! it is simply skipped. WIN64-ABI is very specific about this.
//!
//! All other arguments are pushed on the stack from the right to the left.
//! Stack has to be aligned by 16 bytes, always. There is also a 32-byte
//! shadow space on the stack that can be used to save up to four 64-bit
//! registers.
//!
//! Arguments direction:
//! - Right to left (except for all parameters passed in registers).
//!
//! Stack cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `rax`.
//! - Floating point - `xmm0`.
//!
//! Stack is always aligned to 16 bytes.
//!
//! More information about this calling convention can be found on MSDN
//! <http://msdn.microsoft.com/en-us/library/9b372w95.aspx>.
kCallConvX64Win = 10,
//! X64 calling convention used by Unix platforms (AMD64-ABI).
//!
//! First six 32 or 64-bit integer arguments are passed in `rdi`, `rsi`,
//! `rdx`, `rcx`, `r8`, and `r9` registers. First eight floating point or xmm
//! arguments are passed in `xmm0`, `xmm1`, `xmm2`, `xmm3`, `xmm4`, `xmm5`,
//! `xmm6`, and `xmm7` registers.
//!
//! There is also a red zene below the stack pointer that can be used by the
//! function. The red zone is typically from [rsp-128] to [rsp-8], however,
//! red zone can also be disabled.
//!
//! Arguments direction:
//! - Right to left (except for all arguments passed in registers).
//!
//! Stack cleaned by:
//! - Caller.
//!
//! Return value:
//! - Integer types - `rax`.
//! - Floating point - `xmm0`.
//!
//! Stack is always aligned to 16 bytes.
kCallConvX64Unix = 11,
// --------------------------------------------------------------------------
// [ARM]
// --------------------------------------------------------------------------
kCallConvArm32SoftFP = 16,
kCallConvArm32HardFP = 17,
// --------------------------------------------------------------------------
// [Internal]
// --------------------------------------------------------------------------
//! \internal
_kCallConvX86Start = 1,
//! \internal
_kCallConvX86End = 9,
//! \internal
_kCallConvX64Start = 10,
//! \internal
_kCallConvX64End = 11,
//! \internal
_kCallConvArmStart = 16,
//! \internal
_kCallConvArmEnd = 17,
// --------------------------------------------------------------------------
// [Host]
// --------------------------------------------------------------------------
#if defined(ASMJIT_DOCGEN)
//! Default calling convention based on the current compiler's settings.
//!
//! NOTE: This should be always the same as `kCallConvHostCDecl`, but some
//! compilers allow to override the default calling convention. Overriding
//! is not detected at the moment.
kCallConvHost = DETECTED_AT_COMPILE_TIME,
//! Default C calling convention based on the current compiler's settings.
kCallConvHostCDecl = DETECTED_AT_COMPILE_TIME,
//! Compatibility for `__stdcall` calling convention.
//!
//! NOTE: This enumeration is always set to a value which is compatible with
//! the current compiler's `__stdcall` calling convention. In 64-bit mode
//! there is no such convention and the value is mapped to `kCallConvX64Win`
//! or `kCallConvX64Unix`, depending on the host architecture.
kCallConvHostStdCall = DETECTED_AT_COMPILE_TIME,
//! Compatibility for `__fastcall` calling convention.
//!
//! NOTE: This enumeration is always set to a value which is compatible with
//! the current compiler's `__fastcall` calling convention. In 64-bit mode
//! there is no such convention and the value is mapped to `kCallConvX64Win`
//! or `kCallConvX64Unix`, depending on the host architecture.
kCallConvHostFastCall = DETECTED_AT_COMPILE_TIME
#elif ASMJIT_ARCH_X86
// X86 Host Support.
kCallConvHost = kCallConvX86CDecl,
kCallConvHostCDecl = kCallConvX86CDecl,
kCallConvHostStdCall = kCallConvX86StdCall,
kCallConvHostFastCall =
ASMJIT_CC_MSC ? kCallConvX86MsFastCall :
ASMJIT_CC_GCC ? kCallConvX86GccFastCall :
ASMJIT_CC_CLANG ? kCallConvX86GccFastCall :
ASMJIT_CC_CODEGEAR ? kCallConvX86BorlandFastCall : kCallConvNone
#elif ASMJIT_ARCH_X64
// X64 Host Support.
kCallConvHost = ASMJIT_OS_WINDOWS ? kCallConvX64Win : kCallConvX64Unix,
// These don't exist in 64-bit mode.
kCallConvHostCDecl = kCallConvHost,
kCallConvHostStdCall = kCallConvHost,
kCallConvHostFastCall = kCallConvHost
#elif ASMJIT_ARCH_ARM32
# if defined(__SOFTFP__)
kCallConvHost = kCallConvArm32SoftFP,
# else
kCallConvHost = kCallConvArm32HardFP,
# endif
// These don't exist on ARM.
kCallConvHostCDecl = kCallConvHost,
kCallConvHostStdCall = kCallConvHost,
kCallConvHostFastCall = kCallConvHost
#else
# error "[asmjit] Couldn't determine the target's calling convention."
#endif
};
// ============================================================================
// [asmjit::ErrorCode]
// ============================================================================
//! AsmJit error codes.
ASMJIT_ENUM(ErrorCode) {
//! No error (success).
//!
//! This is default state and state you want.
kErrorOk = 0,
//! Heap memory allocation failed.
kErrorNoHeapMemory,
//! Virtual memory allocation failed.
kErrorNoVirtualMemory,
//! Invalid argument.
kErrorInvalidArgument,
//! Invalid state.
kErrorInvalidState,
//! Invalid architecture.
kErrorInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
//! No code generated.
//!
//! Returned by runtime if the code-generator contains no code.
kErrorNoCodeGenerated,
//! Code generated is too large to fit in memory reserved.
//!
//! Returned by `StaticRuntime` in case that the code generated is too large
//! to fit in the memory already reserved for it.
kErrorCodeTooLarge,
//! Label is already bound.
kErrorLabelAlreadyBound,
//! Unknown instruction (an instruction ID is out of bounds or instruction
//! name is invalid).
kErrorUnknownInst,
//! Illegal instruction.
//!
//! This status code can also be returned in X64 mode if AH, BH, CH or DH
//! registers have been used together with a REX prefix. The instruction
//! is not encodable in such case.
//!
//! Example of raising `kErrorIllegalInst` error.
//!
//! ~~~
//! // Invalid address size.
//! a.mov(dword_ptr(eax), al);
//!
//! // Undecodable instruction - AH used with R10, however R10 can only be
//! // encoded by using REX prefix, which conflicts with AH.
//! a.mov(byte_ptr(r10), ah);
//! ~~~
//!
//! NOTE: In debug mode assertion is raised instead of returning an error.
kErrorIllegalInst,
//! Illegal (unencodable) addressing used.
kErrorIllegalAddresing,
//! Illegal (unencodable) displacement used.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Short form of jump instruction has been used, but the displacement is out
//! of bounds.
kErrorIllegalDisplacement,
//! A variable has been assigned more than once to a function argument (Compiler).
kErrorOverlappedArgs,
//! Count of AsmJit error codes.
kErrorCount
};
//! \}
// ============================================================================
// [asmjit::Init / NoInit]
// ============================================================================
#if !defined(ASMJIT_DOCGEN)
struct _Init {};
static const _Init Init = {};
struct _NoInit {};
static const _NoInit NoInit = {};
#endif // !ASMJIT_DOCGEN
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
namespace DebugUtils {
//! Get a printable version of `asmjit::Error` value.
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! \addtogroup asmjit_base
//! \{
//! Called in debug build to output a debugging message caused by assertion
//! failure or tracing.
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called in debug build on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertions put a breakpoint at assertionFailed()
//! function (asmjit/base/globals.cpp) and check the call stack to locate the
//! failing code.
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
//! \}
} // DebugUtils namespace
} // asmjit namespace
// ============================================================================
// [ASMJIT_ASSERT]
// ============================================================================
#if defined(ASMJIT_DEBUG)
# define ASMJIT_ASSERT(exp) \
do { \
if (!(exp)) { \
::asmjit::DebugUtils::assertionFailed( \
__FILE__ + ::asmjit::DebugUtils::kSourceRelativePathOffset, \
__LINE__, \
#exp); \
} \
} while (0)
# define ASMJIT_NOT_REACHED() \
::asmjit::DebugUtils::assertionFailed( \
__FILE__ + ::asmjit::DebugUtils::kSourceRelativePathOffset, \
__LINE__, \
"MUST NOT BE REACHED")
#else
# define ASMJIT_ASSERT(exp) ASMJIT_NOP
# define ASMJIT_NOT_REACHED() ASMJIT_ASSUME(0)
#endif // DEBUG
// ============================================================================
// [ASMJIT_PROPAGATE_ERROR]
// ============================================================================
//! \internal
//!
//! Used by AsmJit to return the `_Exp_` result if it's an error.
#define ASMJIT_PROPAGATE_ERROR(_Exp_) \
do { \
::asmjit::Error _errval = (_Exp_); \
if (_errval != ::asmjit::kErrorOk) \
return _errval; \
} while (0)
// ============================================================================
// [asmjit_cast<>]
// ============================================================================
//! \addtogroup asmjit_base
//! \{
//! Cast used to cast pointer to function. It's like reinterpret_cast<>,
//! but uses internally C style cast to work with MinGW.
//!
//! If you are using single compiler and `reinterpret_cast<>` works for you,
//! there is no reason to use `asmjit_cast<>`. If you are writing
//! cross-platform software with various compiler support, consider using
//! `asmjit_cast<>` instead of `reinterpret_cast<>`.
template<typename T, typename Z>
static ASMJIT_INLINE T asmjit_cast(Z* p) noexcept { return (T)p; }
//! \}
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_GLOBALS_H

View File

@ -0,0 +1,20 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/hlstream.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,194 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_LOGGER)
// [Dependencies]
#include "../base/containers.h"
#include "../base/logger.h"
#include "../base/utils.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::LogUtil]
// ============================================================================
bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept {
size_t currentLen = sb.getLength();
size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0;
ASMJIT_ASSERT(binLen >= dispLen);
if ((binLen != 0 && binLen != kInvalidIndex) || commentLen) {
size_t align = kMaxInstLength;
char sep = ';';
for (size_t i = (binLen == kInvalidIndex); i < 2; i++) {
size_t begin = sb.getLength();
// Append align.
if (currentLen < align) {
if (!sb.appendChars(' ', align - currentLen))
return false;
}
// Append separator.
if (sep) {
if (!(sb.appendChar(sep) & sb.appendChar(' ')))
return false;
}
// Append binary data or comment.
if (i == 0) {
if (!sb.appendHex(binData, binLen - dispLen - imLen))
return false;
if (!sb.appendChars('.', dispLen * 2))
return false;
if (!sb.appendHex(binData + binLen - imLen, imLen))
return false;
if (commentLen == 0)
break;
}
else {
if (!sb.appendString(comment, commentLen))
return false;
}
currentLen += sb.getLength() - begin;
align += kMaxBinaryLength;
sep = '|';
}
}
return sb.appendChar('\n');
}
// ============================================================================
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
Logger::Logger() noexcept {
_options = 0;
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
void Logger::logFormat(uint32_t style, const char* fmt, ...) noexcept {
char buf[1024];
size_t len;
va_list ap;
va_start(ap, fmt);
len = vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
if (len >= sizeof(buf))
len = sizeof(buf) - 1;
logString(style, buf, len);
}
void Logger::logBinary(uint32_t style, const void* data, size_t size) noexcept {
static const char prefix[] = ".data ";
static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
const uint8_t* s = static_cast<const uint8_t*>(data);
size_t i = size;
char buffer[128];
::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
while (i) {
uint32_t n = static_cast<uint32_t>(Utils::iMin<size_t>(i, 16));
char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1;
i -= n;
do {
uint32_t c = s[0];
p[0] = hex[c >> 4];
p[1] = hex[c & 15];
p += 2;
s += 1;
} while (--n);
*p++ = '\n';
logString(style, buffer, (size_t)(p - buffer));
}
}
// ============================================================================
// [asmjit::Logger - Indentation]
// ============================================================================
void Logger::setIndentation(const char* indentation) noexcept {
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
if (!indentation)
return;
size_t length = Utils::strLen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1);
::memcpy(_indentation, indentation, length);
}
// ============================================================================
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); }
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
void FileLogger::logString(uint32_t style, const char* buf, size_t len) noexcept {
if (!_stream)
return;
if (len == kInvalidIndex)
len = strlen(buf);
fwrite(buf, 1, len, _stream);
}
// ============================================================================
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
void StringLogger::logString(uint32_t style, const char* buf, size_t len) noexcept {
_stringBuilder.appendString(buf, len);
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGER

View File

@ -0,0 +1,268 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_LOGGER_H
#define _ASMJIT_BASE_LOGGER_H
#include "../build.h"
// [Dependencies]
#include "../base/containers.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
#if !defined(ASMJIT_DISABLE_LOGGER)
// ============================================================================
// [asmjit::LogUtil]
// ============================================================================
// Only used by asmjit internals, not available to consumers.
#if defined(ASMJIT_EXPORTS)
struct LogUtil {
enum {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxCommentLength = 512,
kMaxInstLength = 40,
kMaxBinaryLength = 26
};
static bool formatLine(
StringBuilder& sb,
const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept;
};
#endif // ASMJIT_EXPORTS
// ============================================================================
// [asmjit::Logger]
// ============================================================================
//! Abstract logging class.
//!
//! This class can be inherited and reimplemented to fit into your logging
//! subsystem. When reimplementing use `Logger::log()` method to log into
//! a custom stream.
//!
//! This class also contain `_enabled` member that can be used to enable
//! or disable logging.
class ASMJIT_VIRTAPI Logger {
public:
ASMJIT_NO_COPY(Logger)
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Logger options.
ASMJIT_ENUM(Options) {
kOptionBinaryForm = 0x00000001, //! Output instructions also in binary form.
kOptionHexImmediate = 0x00000002, //! Output immediates as hexadecimal numbers.
kOptionHexDisplacement = 0x00000004 //! Output displacements as hexadecimal numbers.
};
// --------------------------------------------------------------------------
// [Style]
// --------------------------------------------------------------------------
//! Logger style.
ASMJIT_ENUM(Style) {
kStyleDefault = 0,
kStyleDirective = 1,
kStyleLabel = 2,
kStyleData = 3,
kStyleComment = 4,
kStyleCount = 5
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `Logger` instance.
ASMJIT_API Logger() noexcept;
//! Destroy the `Logger` instance.
ASMJIT_API virtual ~Logger() noexcept;
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
//! Log output.
virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept = 0;
//! Log formatter message (like sprintf) sending output to `logString()` method.
ASMJIT_API void logFormat(uint32_t style, const char* fmt, ...) noexcept;
//! Log binary data.
ASMJIT_API void logBinary(uint32_t style, const void* data, size_t size) noexcept;
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Get all logger options as a single integer.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Get the given logger option.
ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept {
return (_options & option) != 0;
}
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
ASMJIT_INLINE void clearOptions(uint32_t options) noexcept { _options &= ~options; }
// --------------------------------------------------------------------------
// [Indentation]
// --------------------------------------------------------------------------
//! Get indentation.
ASMJIT_INLINE const char* getIndentation() const noexcept {
return _indentation;
}
//! Set indentation.
ASMJIT_API void setIndentation(const char* indentation) noexcept;
//! Reset indentation.
ASMJIT_INLINE void resetIndentation() noexcept {
setIndentation(nullptr);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Options, see \ref LoggerOption.
uint32_t _options;
//! Indentation.
char _indentation[12];
};
// ============================================================================
// [asmjit::FileLogger]
// ============================================================================
//! Logger that can log to standard C `FILE*` stream.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
ASMJIT_NO_COPY(FileLogger)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `FileLogger` that logs to a `FILE` stream.
ASMJIT_API FileLogger(FILE* stream = nullptr) noexcept;
//! Destroy the `FileLogger`.
ASMJIT_API virtual ~FileLogger() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get `FILE*` stream.
//!
//! NOTE: Return value can be `nullptr`.
ASMJIT_INLINE FILE* getStream() const noexcept {
return _stream;
}
//! Set `FILE*` stream, can be set to `nullptr` to disable logging, although
//! the `ExternalTool` will still call `logString` even if there is no stream.
ASMJIT_INLINE void setStream(FILE* stream) noexcept {
_stream = stream;
}
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! C file stream.
FILE* _stream;
};
// ============================================================================
// [asmjit::StringLogger]
// ============================================================================
//! String logger.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:
ASMJIT_NO_COPY(StringLogger)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create new `StringLogger`.
ASMJIT_API StringLogger() noexcept;
//! Destroy the `StringLogger`.
ASMJIT_API virtual ~StringLogger() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get `char*` pointer which represents the resulting string.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
ASMJIT_INLINE const char* getString() const noexcept {
return _stringBuilder.getData();
}
//! Get the length of the string returned by `getString()`.
ASMJIT_INLINE size_t getLength() const noexcept {
return _stringBuilder.getLength();
}
//! Clear the resulting string.
ASMJIT_INLINE void clearString() noexcept {
_stringBuilder.clear();
}
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Output.
StringBuilder _stringBuilder;
};
#else
struct Logger;
#endif // !ASMJIT_DISABLE_LOGGER
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_LOGGER_H

View File

@ -0,0 +1,52 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Operand]
// ============================================================================
// Prevent static initialization.
class Operand {
public:
struct BaseOp {
uint8_t op;
uint8_t size;
uint8_t reserved_2_1;
uint8_t reserved_3_1;
uint32_t id;
uint32_t reserved_8_4;
uint32_t reserved_12_4;
};
// Kept in union to prevent LTO warnings.
union {
BaseOp _base;
// Required to properly align this _fake_ `Operand`, not used.
uint64_t _data[2];
};
};
ASMJIT_VARAPI const Operand noOperand;
const Operand noOperand = {{ 0, 0, 0, 0, kInvalidValue, 0, 0 }};
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,132 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/podvector.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::PodVectorBase - NullData]
// ============================================================================
const PodVectorBase::Data PodVectorBase::_nullData = { 0, 0 };
static ASMJIT_INLINE bool isDataStatic(PodVectorBase* self, PodVectorBase::Data* d) noexcept {
return (void*)(self + 1) == (void*)d;
}
// ============================================================================
// [asmjit::PodVectorBase - Reset]
// ============================================================================
//! Clear vector data and free internal buffer.
void PodVectorBase::reset(bool releaseMemory) noexcept {
Data* d = _d;
if (d == &_nullData)
return;
if (releaseMemory && !isDataStatic(this, d)) {
ASMJIT_FREE(d);
_d = const_cast<Data*>(&_nullData);
return;
}
d->length = 0;
}
// ============================================================================
// [asmjit::PodVectorBase - Helpers]
// ============================================================================
Error PodVectorBase::_grow(size_t n, size_t sizeOfT) noexcept {
Data* d = _d;
size_t threshold = kMemAllocGrowMax / sizeOfT;
size_t capacity = d->capacity;
size_t after = d->length;
if (IntTraits<size_t>::maxValue() - n < after)
return kErrorNoHeapMemory;
after += n;
if (capacity >= after)
return kErrorOk;
// PodVector is used as a linear array for some data structures used by
// AsmJit code generation. The purpose of this agressive growing schema
// is to minimize memory reallocations, because AsmJit code generation
// classes live short life and will be freed or reused soon.
if (capacity < 32)
capacity = 32;
else if (capacity < 128)
capacity = 128;
else if (capacity < 512)
capacity = 512;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(capacity, sizeOfT);
}
Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) noexcept {
Data* d = _d;
if (d->capacity >= n)
return kErrorOk;
size_t nBytes = sizeof(Data) + n * sizeOfT;
if (ASMJIT_UNLIKELY(nBytes < n))
return kErrorNoHeapMemory;
if (d == &_nullData) {
d = static_cast<Data*>(ASMJIT_ALLOC(nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
d->length = 0;
}
else {
if (isDataStatic(this, d)) {
Data* oldD = d;
d = static_cast<Data*>(ASMJIT_ALLOC(nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
size_t len = oldD->length;
d->length = len;
::memcpy(d->getData(), oldD->getData(), len * sizeOfT);
}
else {
d = static_cast<Data*>(ASMJIT_REALLOC(d, nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
}
}
d->capacity = n;
_d = d;
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,281 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_PODVECTOR_H
#define _ASMJIT_BASE_PODVECTOR_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::PodVectorBase]
// ============================================================================
//! \internal
class PodVectorBase {
public:
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
//! \internal
struct Data {
//! Get data.
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<Data*>(this + 1));
}
//! Capacity of the vector.
size_t capacity;
//! Length of the vector.
size_t length;
};
static ASMJIT_API const Data _nullData;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorBase`.
ASMJIT_INLINE PodVectorBase() noexcept : _d(const_cast<Data*>(&_nullData)) {}
//! Destroy the `PodVectorBase` and its data.
ASMJIT_INLINE ~PodVectorBase() noexcept { reset(true); }
protected:
explicit ASMJIT_INLINE PodVectorBase(Data* d) noexcept : _d(d) {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
public:
//! Reset the vector data and set its `length` to zero.
//!
//! If `releaseMemory` is true the vector buffer will be released to the
//! system.
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
protected:
ASMJIT_API Error _grow(size_t n, size_t sizeOfT) noexcept;
ASMJIT_API Error _reserve(size_t n, size_t sizeOfT) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
public:
Data* _d;
};
// ============================================================================
// [asmjit::PodVector<T>]
// ============================================================================
//! Template used to store and manage array of POD data.
//!
//! This template has these adventages over other vector<> templates:
//! - Non-copyable (designed to be non-copyable, we want it)
//! - No copy-on-write (some implementations of stl can use it)
//! - Optimized for working only with POD types
//! - Uses ASMJIT_... memory management macros
template <typename T>
class PodVector : public PodVectorBase {
public:
ASMJIT_NO_COPY(PodVector<T>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVector<T>`.
ASMJIT_INLINE PodVector() noexcept {}
//! Destroy the `PodVector<T>` and its data.
ASMJIT_INLINE ~PodVector() noexcept {}
protected:
explicit ASMJIT_INLINE PodVector(Data* d) noexcept : PodVectorBase(d) {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
public:
//! Get whether the vector is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _d->length == 0; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _d->length; }
//! Get capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _d->capacity; }
//! Get data.
ASMJIT_INLINE T* getData() noexcept { return static_cast<T*>(_d->getData()); }
//! \overload
ASMJIT_INLINE const T* getData() const noexcept { return static_cast<const T*>(_d->getData()); }
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
//! Called to grow the buffer to fit at least `n` elements more.
ASMJIT_INLINE Error _grow(size_t n) noexcept { return PodVectorBase::_grow(n, sizeof(T)); }
//! Realloc internal array to fit at least `n` items.
ASMJIT_INLINE Error _reserve(size_t n) noexcept { return PodVectorBase::_reserve(n, sizeof(T)); }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Prepend `item` to vector.
Error prepend(const T& item) noexcept {
Data* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
_d = d;
}
::memmove(static_cast<T*>(d->getData()) + 1, d->getData(), d->length * sizeof(T));
::memcpy(d->getData(), &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Insert an `item` at the `index`.
Error insert(size_t index, const T& item) noexcept {
Data* d = _d;
ASMJIT_ASSERT(index <= d->length);
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
T* dst = static_cast<T*>(d->getData()) + index;
::memmove(dst + 1, dst, d->length - index);
::memcpy(dst, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Append `item` to vector.
Error append(const T& item) noexcept {
Data* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
::memcpy(static_cast<T*>(d->getData()) + d->length, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Get index of `val` or `kInvalidIndex` if not found.
size_t indexOf(const T& val) const noexcept {
Data* d = _d;
const T* data = static_cast<const T*>(d->getData());
size_t len = d->length;
for (size_t i = 0; i < len; i++)
if (data[i] == val)
return i;
return kInvalidIndex;
}
//! Remove item at index `i`.
void removeAt(size_t i) noexcept {
Data* d = _d;
ASMJIT_ASSERT(i < d->length);
T* data = static_cast<T*>(d->getData()) + i;
d->length--;
::memmove(data, data + 1, d->length - i);
}
//! Swap this pod-vector with `other`.
void swap(PodVector<T>& other) noexcept {
T* otherData = other._d;
other._d = _d;
_d = otherData;
}
//! Get item at index `i`.
ASMJIT_INLINE T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
//! Get item at index `i`.
ASMJIT_INLINE const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
};
// ============================================================================
// [asmjit::PodVectorTmp<T>]
// ============================================================================
template<typename T, size_t N>
class PodVectorTmp : public PodVector<T> {
public:
ASMJIT_NO_COPY(PodVectorTmp<T, N>)
// --------------------------------------------------------------------------
// [StaticData]
// --------------------------------------------------------------------------
struct StaticData : public PodVectorBase::Data {
char data[sizeof(T) * N];
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorTmp<T>`.
ASMJIT_INLINE PodVectorTmp() noexcept : PodVector<T>(&_staticData) {
_staticData.capacity = N;
_staticData.length = 0;
}
//! Destroy the `PodVectorTmp<T>` and its data.
ASMJIT_INLINE ~PodVectorTmp() noexcept {}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
StaticData _staticData;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_PODVECTOR_H

View File

@ -0,0 +1,214 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/runtime.h"
// TODO: Rename this, or make call conv independent of CompilerFunc.
#include "../base/compilerfunc.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Runtime - Utilities]
// ============================================================================
static ASMJIT_INLINE uint32_t hostStackAlignment() noexcept {
// By default a pointer-size stack alignment is assumed.
uint32_t alignment = sizeof(intptr_t);
// ARM & ARM64
// -----------
//
// - 32-bit ARM requires stack to be aligned to 8 bytes.
// - 64-bit ARM requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
alignment = ASMJIT_ARCH_ARM32 ? 8 : 16;
#endif
// X86 & X64
// ---------
//
// - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, APPLE
// and UNIX guarantees 16-byte stack alignment even in 32-bit, but I'm
// not sure about all other UNIX operating systems, because 16-byte alignment
// is addition to an older specification.
// - 64-bit X86 requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
int modernOS = ASMJIT_OS_LINUX || // Linux & ANDROID.
ASMJIT_OS_MAC || // OSX and iOS.
ASMJIT_OS_BSD; // BSD variants.
alignment = ASMJIT_ARCH_X64 || modernOS ? 16 : 4;
#endif
return alignment;
}
static ASMJIT_INLINE void hostFlushInstructionCache(void* p, size_t size) noexcept {
// Only useful on non-x86 architectures.
#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
# if ASMJIT_OS_WINDOWS
// Windows has a built-in support in kernel32.dll.
::FlushInstructionCache(_memMgr.getProcessHandle(), p, size);
# endif // ASMJIT_OS_WINDOWS
#else
ASMJIT_UNUSED(p);
ASMJIT_UNUSED(size);
#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
}
// ============================================================================
// [asmjit::Runtime - Construction / Destruction]
// ============================================================================
Runtime::Runtime() noexcept
: _runtimeType(kTypeNone),
_allocType(kVMemAllocFreeable),
_cpuInfo(),
_stackAlignment(0),
_cdeclConv(kCallConvNone),
_stdCallConv(kCallConvNone),
_baseAddress(kNoBaseAddress),
_sizeLimit(0) {
::memset(_reserved, 0, sizeof(_reserved));
}
Runtime::~Runtime() noexcept {}
// ============================================================================
// [asmjit::HostRuntime - Construction / Destruction]
// ============================================================================
HostRuntime::HostRuntime() noexcept {
_runtimeType = kTypeJit;
_cpuInfo = CpuInfo::getHost();
_stackAlignment = hostStackAlignment();
_cdeclConv = kCallConvHostCDecl;
_stdCallConv = kCallConvHostStdCall;
}
HostRuntime::~HostRuntime() noexcept {}
// ============================================================================
// [asmjit::HostRuntime - Interface]
// ============================================================================
void HostRuntime::flush(void* p, size_t size) noexcept {
hostFlushInstructionCache(p, size);
}
// ============================================================================
// [asmjit::StaticRuntime - Construction / Destruction]
// ============================================================================
StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) noexcept {
_sizeLimit = sizeLimit;
_baseAddress = static_cast<Ptr>((uintptr_t)baseAddress);
}
StaticRuntime::~StaticRuntime() noexcept {}
// ============================================================================
// [asmjit::StaticRuntime - Interface]
// ============================================================================
Error StaticRuntime::add(void** dst, Assembler* assembler) noexcept {
size_t codeSize = assembler->getCodeSize();
size_t sizeLimit = _sizeLimit;
if (codeSize == 0) {
*dst = nullptr;
return kErrorNoCodeGenerated;
}
if (sizeLimit != 0 && sizeLimit < codeSize) {
*dst = nullptr;
return kErrorCodeTooLarge;
}
Ptr baseAddress = _baseAddress;
uint8_t* p = static_cast<uint8_t*>((void*)static_cast<uintptr_t>(baseAddress));
// Since the base address is known the `relocSize` returned should be equal
// to `codeSize`. It's better to fail if they don't match instead of passsing
// silently.
size_t relocSize = assembler->relocCode(p, baseAddress);
if (relocSize == 0 || codeSize != relocSize) {
*dst = nullptr;
return kErrorInvalidState;
}
_baseAddress += codeSize;
if (sizeLimit)
sizeLimit -= codeSize;
flush(p, codeSize);
*dst = p;
return kErrorOk;
}
Error StaticRuntime::release(void* p) noexcept {
// There is nothing to release as `StaticRuntime` doesn't manage any memory.
ASMJIT_UNUSED(p);
return kErrorOk;
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime() noexcept {}
JitRuntime::~JitRuntime() noexcept {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) noexcept {
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = nullptr;
return kErrorNoCodeGenerated;
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (p == nullptr) {
*dst = nullptr;
return kErrorNoVirtualMemory;
}
// Relocate the code and release the unused memory back to `VMemMgr`.
size_t relocSize = assembler->relocCode(p);
if (relocSize == 0) {
*dst = nullptr;
_memMgr.release(p);
return kErrorInvalidState;
}
if (relocSize < codeSize)
_memMgr.shrink(p, relocSize);
flush(p, relocSize);
*dst = p;
return kErrorOk;
}
Error JitRuntime::release(void* p) noexcept {
return _memMgr.release(p);
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,266 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_RUNTIME_H
#define _ASMJIT_BASE_RUNTIME_H
// [Dependencies]
#include "../base/cpuinfo.h"
#include "../base/vmem.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
class Assembler;
class CpuInfo;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::Runtime]
// ============================================================================
//! Base runtime.
class ASMJIT_VIRTAPI Runtime {
public:
ASMJIT_NO_COPY(Runtime)
// --------------------------------------------------------------------------
// [asmjit::RuntimeType]
// --------------------------------------------------------------------------
ASMJIT_ENUM(Type) {
kTypeNone = 0,
kTypeJit = 1,
kTypeRemote = 2
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `Runtime` instance.
ASMJIT_API Runtime() noexcept;
//! Destroy the `Runtime` instance.
ASMJIT_API virtual ~Runtime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the runtime type, see \ref Type.
ASMJIT_INLINE uint32_t getRuntimeType() const noexcept { return _runtimeType; }
//! Get stack alignment of the target.
ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; }
//! Get the CDECL calling convention conforming to the runtime's ABI.
//!
//! NOTE: This is a default calling convention used by the runtime's target.
ASMJIT_INLINE uint32_t getCdeclConv() const noexcept { return _cdeclConv; }
//! Get the STDCALL calling convention conforming to the runtime's ABI.
//!
//! NOTE: STDCALL calling convention is only used by 32-bit x86 target. On
//! all other targets it's mapped to CDECL and calling `getStdcallConv()` will
//! return the same as `getCdeclConv()`.
ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; }
//! Get CPU information.
ASMJIT_INLINE const CpuInfo& getCpuInfo() const noexcept { return _cpuInfo; }
//! Set CPU information.
ASMJIT_INLINE void setCpuInfo(const CpuInfo& ci) noexcept { _cpuInfo = ci; }
//! Get whether the runtime has a base address.
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != kNoBaseAddress; }
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Allocate a memory needed for a code generated by `assembler` and
//! relocate it to the target location.
//!
//! The beginning of the memory allocated for the function is returned in
//! `dst`. Returns Status code as \ref ErrorCode, on failure `dst` is set to
//! `nullptr`.
virtual Error add(void** dst, Assembler* assembler) noexcept = 0;
//! Release memory allocated by `add`.
virtual Error release(void* p) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Type of the runtime.
uint8_t _runtimeType;
//! Type of the allocation.
uint8_t _allocType;
//! Runtime's stack alignment.
uint8_t _stackAlignment;
//! CDECL calling convention conforming to runtime ABI.
uint8_t _cdeclConv;
//! STDCALL calling convention conforming to runtime ABI.
uint8_t _stdCallConv;
//! \internal
uint8_t _reserved[3];
//! Runtime CPU information.
CpuInfo _cpuInfo;
//! Base address (-1 means no base address).
Ptr _baseAddress;
//! Maximum size of the code that can be added to the runtime (0=unlimited).
size_t _sizeLimit;
};
// ============================================================================
// [asmjit::HostRuntime]
// ============================================================================
//! Base runtime for JIT code generation.
class ASMJIT_VIRTAPI HostRuntime : public Runtime {
public:
ASMJIT_NO_COPY(HostRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `HostRuntime` instance.
ASMJIT_API HostRuntime() noexcept;
//! Destroy the `HostRuntime` instance.
ASMJIT_API virtual ~HostRuntime() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Flush an instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor cache.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! that do not have a transparent instruction cache.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size) noexcept;
};
// ============================================================================
// [asmjit::StaticRuntime]
// ============================================================================
//! JIT static runtime.
//!
//! JIT static runtime can be used to generate code to a memory location that
//! is known.
class ASMJIT_VIRTAPI StaticRuntime : public HostRuntime {
public:
ASMJIT_NO_COPY(StaticRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `StaticRuntime` instance.
//!
//! The `address` specifies a fixed target address, which will be used as a
//! base address for relocation, and `sizeLimit` specifies the maximum size
//! of a code that can be copied to it. If there is no limit `sizeLimit`
//! should be zero.
ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0) noexcept;
//! Destroy the `StaticRuntime` instance.
ASMJIT_API virtual ~StaticRuntime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; }
//! Get the maximum size of the code that can be relocated/stored in the target.
//!
//! Returns zero if unlimited.
ASMJIT_INLINE size_t getSizeLimit() const noexcept { return _sizeLimit; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept;
ASMJIT_API virtual Error release(void* p) noexcept;
};
// ============================================================================
// [asmjit::JitRuntime]
// ============================================================================
//! JIT runtime.
class ASMJIT_VIRTAPI JitRuntime : public HostRuntime {
public:
ASMJIT_NO_COPY(JitRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `JitRuntime` instance.
ASMJIT_API JitRuntime() noexcept;
//! Destroy the `JitRuntime` instance.
ASMJIT_API virtual ~JitRuntime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the type of allocation.
ASMJIT_INLINE uint32_t getAllocType() const noexcept { return _allocType; }
//! Set the type of allocation.
ASMJIT_INLINE void setAllocType(uint32_t allocType) noexcept { _allocType = allocType; }
//! Get the virtual memory manager.
ASMJIT_INLINE VMemMgr* getMemMgr() const noexcept { return const_cast<VMemMgr*>(&_memMgr); }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept;
ASMJIT_API virtual Error release(void* p) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Virtual memory manager.
VMemMgr _memMgr;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_RUNTIME_H

View File

@ -0,0 +1,289 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/utils.h"
#if ASMJIT_OS_POSIX
# include <time.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_OS_MAC
# include <mach/mach_time.h>
#endif // ASMJIT_OS_MAC
#if ASMJIT_OS_WINDOWS
# if defined(_MSC_VER) && _MSC_VER >= 1400
# include <intrin.h>
# else
# define _InterlockedCompareExchange InterlockedCompareExchange
# endif // _MSC_VER
#endif // ASMJIT_OS_WINDOWS
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CpuTicks - Windows]
// ============================================================================
#if ASMJIT_OS_WINDOWS
static volatile uint32_t Utils_hiResTicks;
static volatile double Utils_hiResFreq;
uint32_t Utils::getTickCount() noexcept {
do {
uint32_t hiResOk = Utils_hiResTicks;
if (hiResOk == 1) {
LARGE_INTEGER now;
if (!::QueryPerformanceCounter(&now))
break;
return (int64_t)(double(now.QuadPart) / Utils_hiResFreq);
}
if (hiResOk == 0) {
LARGE_INTEGER qpf;
if (!::QueryPerformanceFrequency(&qpf)) {
_InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 0xFFFFFFFF, 0);
break;
}
LARGE_INTEGER now;
if (!::QueryPerformanceCounter(&now)) {
_InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 0xFFFFFFFF, 0);
break;
}
double freqDouble = double(qpf.QuadPart) / 1000.0;
Utils_hiResFreq = freqDouble;
_InterlockedCompareExchange((LONG*)&Utils_hiResTicks, 1, 0);
return static_cast<uint32_t>(
static_cast<int64_t>(double(now.QuadPart) / freqDouble) & 0xFFFFFFFF);
}
} while (0);
// Bail to a less precise GetTickCount().
return ::GetTickCount();
}
// ============================================================================
// [asmjit::CpuTicks - Mac]
// ============================================================================
#elif ASMJIT_OS_MAC
static mach_timebase_info_data_t CpuTicks_machTime;
uint32_t Utils::getTickCount() noexcept {
// Initialize the first time CpuTicks::now() is called (See Apple's QA1398).
if (CpuTicks_machTime.denom == 0) {
if (mach_timebase_info(&CpuTicks_machTime) != KERN_SUCCESS)
return 0;
}
// mach_absolute_time() returns nanoseconds, we need just milliseconds.
uint64_t t = mach_absolute_time() / 1000000;
t = t * CpuTicks_machTime.numer / CpuTicks_machTime.denom;
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
}
// ============================================================================
// [asmjit::CpuTicks - Posix]
// ============================================================================
#else
uint32_t Utils::getTickCount() noexcept {
#if defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0)
return 0;
uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000);
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
#else // _POSIX_MONOTONIC_CLOCK
#error "[asmjit] Utils::getTickCount() is not implemented for your target OS."
return 0;
#endif // _POSIX_MONOTONIC_CLOCK
}
#endif // ASMJIT_OS
// ============================================================================
// [asmjit::Utils - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_utils) {
uint32_t i;
INFO("IntTraits<>.");
EXPECT(IntTraits<signed char>::kIsSigned,"IntTraits<signed char> should report signed.");
EXPECT(IntTraits<short>::kIsSigned, "IntTraits<signed short> should report signed.");
EXPECT(IntTraits<int>::kIsSigned, "IntTraits<int> should report signed.");
EXPECT(IntTraits<long>::kIsSigned, "IntTraits<long> should report signed.");
EXPECT(IntTraits<unsigned char>::kIsUnsigned, "IntTraits<unsigned char> should report unsigned.");
EXPECT(IntTraits<unsigned short>::kIsUnsigned, "IntTraits<unsigned short> should report unsigned.");
EXPECT(IntTraits<unsigned int>::kIsUnsigned, "IntTraits<unsigned int> should report unsigned.");
EXPECT(IntTraits<unsigned long>::kIsUnsigned, "IntTraits<unsigned long> should report unsigned.");
EXPECT(IntTraits<intptr_t>::kIsSigned, "IntTraits<intptr_t> should report signed.");
EXPECT(IntTraits<uintptr_t>::kIsUnsigned, "IntTraits<uintptr_t> should report unsigned.");
EXPECT(IntTraits<intptr_t>::kIsIntPtr, "IntTraits<intptr_t> should report intptr_t type.");
EXPECT(IntTraits<uintptr_t>::kIsIntPtr, "IntTraits<uintptr_t> should report intptr_t type.");
INFO("Utils::iMin()/iMax().");
EXPECT(Utils::iMin<int>( 0, -1) == -1, "Utils::iMin<int> should return a minimum value.");
EXPECT(Utils::iMin<int>(-1, -2) == -2, "Utils::iMin<int> should return a minimum value.");
EXPECT(Utils::iMin<int>( 1, 2) == 1, "Utils::iMin<int> should return a minimum value.");
EXPECT(Utils::iMax<int>( 0, -1) == 0, "Utils::iMax<int> should return a maximum value.");
EXPECT(Utils::iMax<int>(-1, -2) == -1, "Utils::iMax<int> should return a maximum value.");
EXPECT(Utils::iMax<int>( 1, 2) == 2, "Utils::iMax<int> should return a maximum value.");
INFO("Utils::inInterval().");
EXPECT(Utils::inInterval<int>(11 , 10, 20) == true , "Utils::inInterval<int> should return true if inside.");
EXPECT(Utils::inInterval<int>(101, 10, 20) == false, "Utils::inInterval<int> should return false if outside.");
INFO("Utils::isInt8().");
EXPECT(Utils::isInt8(-128) == true , "Utils::isInt8<> should return true if inside.");
EXPECT(Utils::isInt8( 127) == true , "Utils::isInt8<> should return true if inside.");
EXPECT(Utils::isInt8(-129) == false, "Utils::isInt8<> should return false if outside.");
EXPECT(Utils::isInt8( 128) == false, "Utils::isInt8<> should return false if outside.");
INFO("Utils::isInt16().");
EXPECT(Utils::isInt16(-32768) == true , "Utils::isInt16<> should return true if inside.");
EXPECT(Utils::isInt16( 32767) == true , "Utils::isInt16<> should return true if inside.");
EXPECT(Utils::isInt16(-32769) == false, "Utils::isInt16<> should return false if outside.");
EXPECT(Utils::isInt16( 32768) == false, "Utils::isInt16<> should return false if outside.");
INFO("Utils::isInt32().");
EXPECT(Utils::isInt32( 2147483647 ) == true, "Utils::isInt32<int> should return true if inside.");
EXPECT(Utils::isInt32(-2147483647 - 1) == true, "Utils::isInt32<int> should return true if inside.");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(2147483648)) == false, "Utils::isInt32<int> should return false if outside.");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, "Utils::isInt32<int> should return false if outside.");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isInt32<int> should return false if outside.");
INFO("Utils::isUInt8().");
EXPECT(Utils::isUInt8(0) == true , "Utils::isUInt8<> should return true if inside.");
EXPECT(Utils::isUInt8(255) == true , "Utils::isUInt8<> should return true if inside.");
EXPECT(Utils::isUInt8(256) == false, "Utils::isUInt8<> should return false if outside.");
EXPECT(Utils::isUInt8(-1) == false, "Utils::isUInt8<> should return false if negative.");
INFO("Utils::isUInt12().");
EXPECT(Utils::isUInt12(0) == true , "Utils::isUInt12<> should return true if inside.");
EXPECT(Utils::isUInt12(4095) == true , "Utils::isUInt12<> should return true if inside.");
EXPECT(Utils::isUInt12(4096) == false, "Utils::isUInt12<> should return false if outside.");
EXPECT(Utils::isUInt12(-1) == false, "Utils::isUInt12<> should return false if negative.");
INFO("Utils::isUInt16().");
EXPECT(Utils::isUInt16(0) == true , "Utils::isUInt16<> should return true if inside.");
EXPECT(Utils::isUInt16(65535) == true , "Utils::isUInt16<> should return true if inside.");
EXPECT(Utils::isUInt16(65536) == false, "Utils::isUInt16<> should return false if outside.");
EXPECT(Utils::isUInt16(-1) == false, "Utils::isUInt16<> should return false if negative.");
INFO("Utils::isUInt32().");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, "Utils::isUInt32<uint64_t> should return true if inside.");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isUInt32<uint64_t> should return false if outside.");
EXPECT(Utils::isUInt32(-1) == false, "Utils::isUInt32<int> should return false if negative.");
INFO("Utils::isPower2().");
for (i = 0; i < 64; i++) {
EXPECT(Utils::isPowerOf2(static_cast<uint64_t>(1) << i) == true,
"Utils::isPower2() didn't report power of 2.");
EXPECT(Utils::isPowerOf2((static_cast<uint64_t>(1) << i) ^ 0x001101) == false,
"Utils::isPower2() didn't report not power of 2.");
}
INFO("Utils::mask().");
for (i = 0; i < 32; i++) {
EXPECT(Utils::mask(i) == (1 << i),
"Utils::mask(%u) should return %X.", i, (1 << i));
}
INFO("Utils::bits().");
for (i = 0; i < 32; i++) {
uint32_t expectedBits = 0;
for (uint32_t b = 0; b < i; b++)
expectedBits |= static_cast<uint32_t>(1) << b;
EXPECT(Utils::bits(i) == expectedBits,
"Utils::bits(%u) should return %X.", i, expectedBits);
}
INFO("Utils::hasBit().");
for (i = 0; i < 32; i++) {
EXPECT(Utils::hasBit((1 << i), i) == true,
"Utils::hasBit(%X, %u) should return true.", (1 << i), i);
}
INFO("Utils::bitCount().");
for (i = 0; i < 32; i++) {
EXPECT(Utils::bitCount((1 << i)) == 1,
"Utils::bitCount(%X) should return true.", (1 << i));
}
EXPECT(Utils::bitCount(0x000000F0) == 4, "");
EXPECT(Utils::bitCount(0x10101010) == 4, "");
EXPECT(Utils::bitCount(0xFF000000) == 8, "");
EXPECT(Utils::bitCount(0xFFFFFFF7) == 31, "");
EXPECT(Utils::bitCount(0x7FFFFFFF) == 31, "");
INFO("Utils::findFirstBit().");
for (i = 0; i < 32; i++) {
EXPECT(Utils::findFirstBit((1 << i)) == i,
"Utils::findFirstBit(%X) should return %u.", (1 << i), i);
}
INFO("Utils::keepNOnesFromRight().");
EXPECT(Utils::keepNOnesFromRight(0xF, 1) == 0x1, "");
EXPECT(Utils::keepNOnesFromRight(0xF, 2) == 0x3, "");
EXPECT(Utils::keepNOnesFromRight(0xF, 3) == 0x7, "");
EXPECT(Utils::keepNOnesFromRight(0x5, 2) == 0x5, "");
EXPECT(Utils::keepNOnesFromRight(0xD, 2) == 0x5, "");
INFO("Utils::isAligned().");
EXPECT(Utils::isAligned<size_t>(0xFFFF, 4) == false, "");
EXPECT(Utils::isAligned<size_t>(0xFFF4, 4) == true , "");
EXPECT(Utils::isAligned<size_t>(0xFFF8, 8) == true , "");
EXPECT(Utils::isAligned<size_t>(0xFFF0, 16) == true , "");
INFO("Utils::alignTo().");
EXPECT(Utils::alignTo<size_t>(0xFFFF, 4) == 0x10000, "");
EXPECT(Utils::alignTo<size_t>(0xFFF4, 4) == 0x0FFF4, "");
EXPECT(Utils::alignTo<size_t>(0xFFF8, 8) == 0x0FFF8, "");
EXPECT(Utils::alignTo<size_t>(0xFFF0, 16) == 0x0FFF0, "");
EXPECT(Utils::alignTo<size_t>(0xFFF0, 32) == 0x10000, "");
INFO("Utils::alignToPowerOf2().");
EXPECT(Utils::alignToPowerOf2<size_t>(0xFFFF) == 0x10000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0xF123) == 0x10000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x0F00) == 0x01000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x0100) == 0x00100, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x1001) == 0x02000, "");
INFO("Utils::alignDiff().");
EXPECT(Utils::alignDiff<size_t>(0xFFFF, 4) == 1, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF4, 4) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF8, 8) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF0, 16) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF0, 32) == 16, "");
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,233 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_VMEM_H
#define _ASMJIT_BASE_VMEM_H
// [Dependencies]
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VMemAllocType]
// ============================================================================
//! Type of virtual memory allocation, see `VMemMgr::alloc()`.
ASMJIT_ENUM(VMemAllocType) {
//! Normal memory allocation, has to be freed by `VMemMgr::release()`.
kVMemAllocFreeable = 0,
//! Allocate permanent memory, can't be freed.
kVMemAllocPermanent = 1
};
// ============================================================================
// [asmjit::VMemFlags]
// ============================================================================
//! Type of virtual memory allocation, see `VMemMgr::alloc()`.
ASMJIT_ENUM(VMemFlags) {
//! Memory is writable.
kVMemFlagWritable = 0x00000001,
//! Memory is executable.
kVMemFlagExecutable = 0x00000002
};
// ============================================================================
// [asmjit::VMemUtil]
// ============================================================================
//! Virtual memory utilities.
//!
//! Defines functions that provide facility to allocate and free memory that is
//! executable in a platform independent manner. If both the processor and host
//! operating system support data-execution-prevention then the only way how to
//! run machine code is to allocate it to a memory that has marked as executable.
//! VMemUtil is just unified interface to platform dependent APIs.
//!
//! `VirtualAlloc()` function is used on Windows operating system and `mmap()`
//! on POSIX. `VirtualAlloc()` and `mmap()` documentation provide a detailed
//! overview on how to use a platform specific APIs.
struct VMemUtil {
//! Get a size/alignment of a single virtual memory page.
static ASMJIT_API size_t getPageSize() noexcept;
//! Get a recommended granularity for a single `alloc` call.
static ASMJIT_API size_t getPageGranularity() noexcept;
//! Allocate virtual memory.
//!
//! Pages are readable/writeable, but they are not guaranteed to be
//! executable unless 'canExecute' is true. Returns the address of
//! allocated memory, or `nullptr` on failure.
static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags) noexcept;
//! Free memory allocated by `alloc()`.
static ASMJIT_API Error release(void* addr, size_t length) noexcept;
#if ASMJIT_OS_WINDOWS
//! Allocate virtual memory of `hProcess` (Windows only).
static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory of `hProcess` (Windows only).
static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length) noexcept;
#endif // ASMJIT_OS_WINDOWS
};
// ============================================================================
// [asmjit::VMemMgr]
// ============================================================================
//! Reference implementation of memory manager that uses `VMemUtil` to allocate
//! chunks of virtual memory and bit arrays to manage it.
class VMemMgr {
public:
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
#if !ASMJIT_OS_WINDOWS
//! Create a `VMemMgr` instance.
ASMJIT_API VMemMgr() noexcept;
#else
//! Create a `VMemMgr` instance.
//!
//! NOTE: When running on Windows it's possible to specify a `hProcess` to
//! be used for memory allocation. Using `hProcess` allows to allocate memory
//! of a remote process.
ASMJIT_API VMemMgr(HANDLE hProcess = static_cast<HANDLE>(0)) noexcept;
#endif // ASMJIT_OS_WINDOWS
//! Destroy the `VMemMgr` instance and free all blocks.
ASMJIT_API ~VMemMgr() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Free all allocated memory.
ASMJIT_API void reset() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
//! Get the handle of the process memory manager is bound to.
ASMJIT_INLINE HANDLE getProcessHandle() const noexcept {
return _hProcess;
}
#endif // ASMJIT_OS_WINDOWS
//! Get how many bytes are currently allocated.
ASMJIT_INLINE size_t getAllocatedBytes() const noexcept {
return _allocatedBytes;
}
//! Get how many bytes are currently used.
ASMJIT_INLINE size_t getUsedBytes() const noexcept {
return _usedBytes;
}
//! Get whether to keep allocated memory after the `VMemMgr` is destroyed.
//!
//! \sa \ref setKeepVirtualMemory.
ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept {
return _keepVirtualMemory;
}
//! Set whether to keep allocated memory after memory manager is
//! destroyed.
//!
//! This method is usable when patching code of remote process. You need to
//! allocate process memory, store generated assembler into it and patch the
//! method you want to redirect (into your code). This method affects only
//! VMemMgr destructor. After destruction all internal
//! structures are freed, only the process virtual memory remains.
//!
//! NOTE: Memory allocated with kVMemAllocPermanent is always kept.
//!
//! \sa \ref getKeepVirtualMemory.
ASMJIT_INLINE void setKeepVirtualMemory(bool keepVirtualMemory) noexcept {
_keepVirtualMemory = keepVirtualMemory;
}
// --------------------------------------------------------------------------
// [Alloc / Release]
// --------------------------------------------------------------------------
//! Allocate a `size` bytes of virtual memory.
//!
//! Note that if you are implementing your own virtual memory manager then you
//! can quitly ignore type of allocation. This is mainly for AsmJit to memory
//! manager that allocated memory will be never freed.
ASMJIT_API void* alloc(size_t size, uint32_t type = kVMemAllocFreeable) noexcept;
//! Free previously allocated memory at a given `address`.
ASMJIT_API Error release(void* p) noexcept;
//! Free extra memory allocated with `p`.
ASMJIT_API Error shrink(void* p, size_t used) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
//! Process passed to `VirtualAllocEx` and `VirtualFree`.
HANDLE _hProcess;
#endif // ASMJIT_OS_WINDOWS
//! Lock to enable thread-safe functionality.
Lock _lock;
//! Default block size.
size_t _blockSize;
//! Default block density.
size_t _blockDensity;
// Whether to keep virtual memory after destroy.
bool _keepVirtualMemory;
//! How many bytes are currently allocated.
size_t _allocatedBytes;
//! How many bytes are currently used.
size_t _usedBytes;
//! \internal
//! \{
struct RbNode;
struct MemNode;
struct PermanentNode;
// Memory nodes root.
MemNode* _root;
// Memory nodes list.
MemNode* _first;
MemNode* _last;
MemNode* _optimal;
// Permanent memory.
PermanentNode* _permanent;
//! \}
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_VMEM_H

View File

@ -0,0 +1,193 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/utils.h"
#include "../base/zone.h"
#include <stdarg.h>
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! Zero size block used by `Zone` that doesn't have any memory allocated.
static const Zone::Block Zone_zeroBlock = {
nullptr, nullptr, nullptr, nullptr, { 0 }
};
// ============================================================================
// [asmjit::Zone - Construction / Destruction]
// ============================================================================
Zone::Zone(size_t blockSize) noexcept {
_block = const_cast<Zone::Block*>(&Zone_zeroBlock);
_blockSize = blockSize;
}
Zone::~Zone() noexcept {
reset(true);
}
// ============================================================================
// [asmjit::Zone - Reset]
// ============================================================================
void Zone::reset(bool releaseMemory) noexcept {
Block* cur = _block;
// Can't be altered.
if (cur == &Zone_zeroBlock)
return;
if (releaseMemory) {
// Since cur can be in the middle of the double-linked list, we have to
// traverse to both directions `prev` and `next` separately.
Block* next = cur->next;
do {
Block* prev = cur->prev;
ASMJIT_FREE(cur);
cur = prev;
} while (cur != nullptr);
cur = next;
while (cur != nullptr) {
next = cur->next;
ASMJIT_FREE(cur);
cur = next;
}
_block = const_cast<Zone::Block*>(&Zone_zeroBlock);
}
else {
while (cur->prev != nullptr)
cur = cur->prev;
cur->pos = cur->data;
_block = cur;
}
}
// ============================================================================
// [asmjit::Zone - Alloc]
// ============================================================================
void* Zone::_alloc(size_t size) noexcept {
Block* curBlock = _block;
size_t blockSize = Utils::iMax<size_t>(_blockSize, size);
// The `_alloc()` method can only be called if there is not enough space
// in the current block, see `alloc()` implementation for more details.
ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || curBlock->getRemainingSize() < size);
// If the `Zone` has been reset the current block doesn't have to be the
// last one. Check if there is a block that can be used instead of allocating
// a new one. If there is a `next` block it's completely unused, we don't have
// to check for remaining bytes.
Block* next = curBlock->next;
if (next != nullptr && next->getBlockSize() >= size) {
next->pos = next->data + size;
_block = next;
return static_cast<void*>(next->data);
}
// Prevent arithmetic overflow.
if (blockSize > ~static_cast<size_t>(0) - sizeof(Block))
return nullptr;
Block* newBlock = static_cast<Block*>(ASMJIT_ALLOC(sizeof(Block) - sizeof(void*) + blockSize));
if (newBlock == nullptr)
return nullptr;
newBlock->pos = newBlock->data + size;
newBlock->end = newBlock->data + blockSize;
newBlock->prev = nullptr;
newBlock->next = nullptr;
if (curBlock != &Zone_zeroBlock) {
newBlock->prev = curBlock;
curBlock->next = newBlock;
// Does only happen if there is a next block, but the requested memory
// can't fit into it. In this case a new buffer is allocated and inserted
// between the current block and the next one.
if (next != nullptr) {
newBlock->next = next;
next->prev = newBlock;
}
}
_block = newBlock;
return static_cast<void*>(newBlock->data);
}
void* Zone::allocZeroed(size_t size) noexcept {
void* p = alloc(size);
if (p != nullptr)
::memset(p, 0, size);
return p;
}
void* Zone::dup(const void* data, size_t size) noexcept {
if (data == nullptr)
return nullptr;
if (size == 0)
return nullptr;
void* m = alloc(size);
if (m == nullptr)
return nullptr;
::memcpy(m, data, size);
return m;
}
char* Zone::sdup(const char* str) noexcept {
if (str == nullptr)
return nullptr;
size_t len = ::strlen(str);
if (len == 0)
return nullptr;
// Include NULL terminator and limit string length.
if (++len > 256)
len = 256;
char* m = static_cast<char*>(alloc(len));
if (m == nullptr)
return nullptr;
::memcpy(m, str, len);
m[len - 1] = '\0';
return m;
}
char* Zone::sformat(const char* fmt, ...) noexcept {
if (fmt == nullptr)
return nullptr;
char buf[512];
size_t len;
va_list ap;
va_start(ap, fmt);
len = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap);
buf[len++] = 0;
va_end(ap);
return static_cast<char*>(dup(buf, len));
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

View File

@ -0,0 +1,220 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_ZONE_H
#define _ASMJIT_BASE_ZONE_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::Zone]
// ============================================================================
//! Zone memory allocator.
//!
//! Zone is an incremental memory allocator that allocates memory by simply
//! incrementing a pointer. It allocates blocks of memory by using standard
//! C library `malloc/free`, but divides these blocks into smaller segments
//! requirested by calling `Zone::alloc()` and friends.
//!
//! Zone memory allocators are designed to allocate data of short lifetime. The
//! data used by `Assembler` and `Compiler` has a very short lifetime, thus, is
//! allocated by `Zone`. The advantage is that `Zone` can free all of the data
//! allocated at once by calling `reset()` or by `Zone` destructor.
class Zone {
public:
//! \internal
//!
//! A single block of memory.
struct Block {
// ------------------------------------------------------------------------
// [Accessors]
// ------------------------------------------------------------------------
//! Get the size of the block.
ASMJIT_INLINE size_t getBlockSize() const noexcept {
return (size_t)(end - data);
}
//! Get count of remaining bytes in the block.
ASMJIT_INLINE size_t getRemainingSize() const noexcept {
return (size_t)(end - pos);
}
// ------------------------------------------------------------------------
// [Members]
// ------------------------------------------------------------------------
//! Current data pointer (pointer to the first available byte).
uint8_t* pos;
//! End data pointer (pointer to the first invalid byte).
uint8_t* end;
//! Link to the previous block.
Block* prev;
//! Link to the next block.
Block* next;
//! Data.
uint8_t data[sizeof(void*)];
};
enum {
//! Zone allocator overhead.
kZoneOverhead =
kMemAllocOverhead
+ static_cast<int>(sizeof(Block) - sizeof(void*))
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `Zone` allocator.
//!
//! The `blockSize` parameter describes the default size of the block. If the
//! `size` parameter passed to `alloc()` is greater than the default size
//! `Zone` will allocate and use a larger block, but it will not change the
//! default `blockSize`.
//!
//! It's not required, but it's good practice to set `blockSize` to a
//! reasonable value that depends on the usage of `Zone`. Greater block sizes
//! are generally safer and performs better than unreasonably low values.
ASMJIT_API Zone(size_t blockSize) noexcept;
//! Destroy the `Zone` instance.
//!
//! This will destroy the `Zone` instance and release all blocks of memory
//! allocated by it. It performs implicit `reset(true)`.
ASMJIT_API ~Zone() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset the `Zone` invalidating all blocks allocated.
//!
//! If `releaseMemory` is true all buffers will be released to the system.
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the default block size.
ASMJIT_INLINE size_t getBlockSize() const noexcept {
return _blockSize;
}
// --------------------------------------------------------------------------
// [Alloc]
// --------------------------------------------------------------------------
//! Allocate `size` bytes of memory.
//!
//! Pointer returned is valid until the `Zone` instance is destroyed or reset
//! by calling `reset()`. If you plan to make an instance of C++ from the
//! given pointer use placement `new` and `delete` operators:
//!
//! ~~~
//! using namespace asmjit;
//!
//! class Object { ... };
//!
//! // Create Zone with default block size of approximately 65536 bytes.
//! Zone zone(65536 - Zone::kZoneOverhead);
//!
//! // Create your objects using zone object allocating, for example:
//! Object* obj = static_cast<Object*>( zone.alloc(sizeof(Object)) );
//
//! if (obj == nullptr) {
//! // Handle out of memory error.
//! }
//!
//! // Placement `new` and `delete` operators can be used to instantiate it.
//! new(obj) Object();
//!
//! // ... lifetime of your objects ...
//!
//! // To destroy the instance (if required).
//! obj->~Object();
//!
//! // Reset or destroy `Zone`.
//! zone.reset();
//! ~~~
ASMJIT_INLINE void* alloc(size_t size) noexcept {
Block* cur = _block;
uint8_t* ptr = cur->pos;
size_t remainingBytes = (size_t)(cur->end - ptr);
if (remainingBytes < size)
return _alloc(size);
cur->pos += size;
ASMJIT_ASSERT(cur->pos <= cur->end);
return (void*)ptr;
}
//! Allocate `size` bytes of zeroed memory.
//!
//! See \ref alloc() for more details.
ASMJIT_API void* allocZeroed(size_t size) noexcept;
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(alloc(size));
}
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(allocZeroed(size));
}
//! \internal
ASMJIT_API void* _alloc(size_t size) noexcept;
//! Helper to duplicate data.
ASMJIT_API void* dup(const void* data, size_t size) noexcept;
//! Helper to duplicate string.
ASMJIT_API char* sdup(const char* str) noexcept;
//! Helper to duplicate formatted string, maximum length is 256 bytes.
ASMJIT_API char* sformat(const char* str, ...) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! The current block.
Block* _block;
//! Default block size.
size_t _blockSize;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_ZONE_H

928
DynamicHooks/thirdparty/AsmJit/build.h vendored Normal file
View File

@ -0,0 +1,928 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BUILD_H
#define _ASMJIT_BUILD_H
// ============================================================================
// [asmjit::Build - Configuration]
// ============================================================================
// AsmJit is by default compiled only for a host processor for the purpose of
// JIT code generation. Both Assembler and Compiler code generators are compiled
// by default. Preprocessor macros can be used to change the default behavior.
// External Config File
// --------------------
//
// Define in case your configuration is generated in an external file to be
// included.
#if defined(ASMJIT_CONFIG_FILE)
# include ASMJIT_CONFIG_FILE
#endif // ASMJIT_CONFIG_FILE
// AsmJit Static Builds and Embedding
// ----------------------------------
//
// These definitions can be used to enable static library build. Embed is used
// when AsmJit's source code is embedded directly in another project, implies
// static build as well.
//
// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_STATIC).
// #define ASMJIT_STATIC // Define to enable static-library build.
// AsmJit Build Modes
// ------------------
//
// These definitions control the build mode and tracing support. The build mode
// should be auto-detected at compile time, but it's possible to override it in
// case that the auto-detection fails.
//
// Tracing is a feature that is never compiled by default and it's only used to
// debug AsmJit itself.
//
// #define ASMJIT_DEBUG // Define to enable debug-mode.
// #define ASMJIT_RELEASE // Define to enable release-mode.
// #define ASMJIT_TRACE // Define to enable tracing.
// AsmJit Build Backends
// ---------------------
//
// These definitions control which backends to compile. If none of these is
// defined AsmJit will use host architecture by default (for JIT code generation).
//
// #define ASMJIT_BUILD_X86 // Define to enable x86 instruction set (32-bit).
// #define ASMJIT_BUILD_X64 // Define to enable x64 instruction set (64-bit).
// #define ASMJIT_BUILD_HOST // Define to enable host instruction set.
// AsmJit Build Features
// ---------------------
//
// Flags can be defined to disable standard features. These are handy especially
// when building asmjit statically and some features are not needed or unwanted
// (like Compiler).
//
// AsmJit features are enabled by default.
// #define ASMJIT_DISABLE_COMPILER // Disable Compiler (completely).
// #define ASMJIT_DISABLE_LOGGER // Disable Logger (completely).
// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text
// // representation (instructions, errors, ...).
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGER)
# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGER to be defined."
#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGER
// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside.
#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) && !defined(NDEBUG)
# define ASMJIT_DEBUG
#else
# define ASMJIT_RELEASE
#endif
// ASMJIT_EMBED implies ASMJIT_STATIC.
#if defined(ASMJIT_EMBED) && !defined(ASMJIT_STATIC)
# define ASMJIT_STATIC
#endif
// ============================================================================
// [asmjit::Build - VERSION]
// ============================================================================
// [@VERSION{@]
#define ASMJIT_VERSION_MAJOR 1
#define ASMJIT_VERSION_MINOR 0
#define ASMJIT_VERSION_PATCH 0
#define ASMJIT_VERSION_STRING "1.0.0"
// [@VERSION}@]
// ============================================================================
// [asmjit::Build - WIN32]
// ============================================================================
// [@WIN32_CRT_NO_DEPRECATE{@]
#if defined(_MSC_VER) && defined(ASMJIT_EXPORTS)
# if !defined(_CRT_SECURE_NO_DEPRECATE)
# define _CRT_SECURE_NO_DEPRECATE
# endif
# if !defined(_CRT_SECURE_NO_WARNINGS)
# define _CRT_SECURE_NO_WARNINGS
# endif
#endif
// [@WIN32_CRT_NO_DEPRECATE}@]
// [@WIN32_LEAN_AND_MEAN{@]
#if (defined(_WIN32) || defined(_WINDOWS)) && !defined(_WINDOWS_)
# if !defined(WIN32_LEAN_AND_MEAN)
# define WIN32_LEAN_AND_MEAN
# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif
# if !defined(NOMINMAX)
# define NOMINMAX
# define ASMJIT_UNDEF_NOMINMAX
# endif
# include <windows.h>
# if defined(ASMJIT_UNDEF_NOMINMAX)
# undef NOMINMAX
# undef ASMJIT_UNDEF_NOMINMAX
# endif
# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN)
# undef WIN32_LEAN_AND_MEAN
# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif
#endif
// [@WIN32_LEAN_AND_MEAN}@]
// ============================================================================
// [asmjit::Build - OS]
// ============================================================================
// [@OS{@]
#if defined(_WIN32) || defined(_WINDOWS)
#define ASMJIT_OS_WINDOWS (1)
#else
#define ASMJIT_OS_WINDOWS (0)
#endif
#if defined(__APPLE__)
# include <TargetConditionals.h>
# define ASMJIT_OS_MAC (TARGET_OS_MAC)
# define ASMJIT_OS_IOS (TARGET_OS_IPHONE)
#else
# define ASMJIT_OS_MAC (0)
# define ASMJIT_OS_IOS (0)
#endif
#if defined(__ANDROID__)
# define ASMJIT_OS_ANDROID (1)
#else
# define ASMJIT_OS_ANDROID (0)
#endif
#if defined(__linux__) || defined(__ANDROID__)
# define ASMJIT_OS_LINUX (1)
#else
# define ASMJIT_OS_LINUX (0)
#endif
#if defined(__DragonFly__)
# define ASMJIT_OS_DRAGONFLYBSD (1)
#else
# define ASMJIT_OS_DRAGONFLYBSD (0)
#endif
#if defined(__FreeBSD__)
# define ASMJIT_OS_FREEBSD (1)
#else
# define ASMJIT_OS_FREEBSD (0)
#endif
#if defined(__NetBSD__)
# define ASMJIT_OS_NETBSD (1)
#else
# define ASMJIT_OS_NETBSD (0)
#endif
#if defined(__OpenBSD__)
# define ASMJIT_OS_OPENBSD (1)
#else
# define ASMJIT_OS_OPENBSD (0)
#endif
#if defined(__QNXNTO__)
# define ASMJIT_OS_QNX (1)
#else
# define ASMJIT_OS_QNX (0)
#endif
#if defined(__sun)
# define ASMJIT_OS_SOLARIS (1)
#else
# define ASMJIT_OS_SOLARIS (0)
#endif
#if defined(__CYGWIN__)
# define ASMJIT_OS_CYGWIN (1)
#else
# define ASMJIT_OS_CYGWIN (0)
#endif
#define ASMJIT_OS_BSD ( \
ASMJIT_OS_FREEBSD || \
ASMJIT_OS_DRAGONFLYBSD || \
ASMJIT_OS_NETBSD || \
ASMJIT_OS_OPENBSD || \
ASMJIT_OS_MAC)
#define ASMJIT_OS_POSIX (!ASMJIT_OS_WINDOWS)
// [@OS}@]
// ============================================================================
// [asmjit::Build - ARCH]
// ============================================================================
// [@ARCH{@]
// \def ASMJIT_ARCH_ARM32
// True if the target architecture is a 32-bit ARM.
//
// \def ASMJIT_ARCH_ARM64
// True if the target architecture is a 64-bit ARM.
//
// \def ASMJIT_ARCH_X86
// True if the target architecture is a 32-bit X86/IA32
//
// \def ASMJIT_ARCH_X64
// True if the target architecture is a 64-bit X64/AMD64
//
// \def ASMJIT_ARCH_LE
// True if the target architecture is little endian.
//
// \def ASMJIT_ARCH_BE
// True if the target architecture is big endian.
//
// \def ASMJIT_ARCH_64BIT
// True if the target architecture is 64-bit.
#if (defined(_M_X64 ) || defined(__x86_64) || defined(__x86_64__) || \
defined(_M_AMD64) || defined(__amd64 ) || defined(__amd64__ ))
# define ASMJIT_ARCH_X64 1
#else
# define ASMJIT_ARCH_X64 0
#endif
#if (defined(_M_IX86 ) || defined(__X86__ ) || defined(__i386 ) || \
defined(__IA32__) || defined(__I86__ ) || defined(__i386__) || \
defined(__i486__) || defined(__i586__) || defined(__i686__))
# define ASMJIT_ARCH_X86 (!ASMJIT_ARCH_X64)
#else
# define ASMJIT_ARCH_X86 0
#endif
#if defined(__aarch64__)
# define ASMJIT_ARCH_ARM64 1
#else
# define ASMJIT_ARCH_ARM64 0
#endif
#if (defined(_M_ARM ) || defined(__arm ) || defined(__thumb__ ) || \
defined(_M_ARMT ) || defined(__arm__ ) || defined(__thumb2__))
# define ASMJIT_ARCH_ARM32 (!ASMJIT_ARCH_ARM64)
#else
# define ASMJIT_ARCH_ARM32 0
#endif
#define ASMJIT_ARCH_LE ( \
ASMJIT_ARCH_X86 || \
ASMJIT_ARCH_X64 || \
ASMJIT_ARCH_ARM32 || \
ASMJIT_ARCH_ARM64 )
#define ASMJIT_ARCH_BE (!(ASMJIT_ARCH_LE))
#define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64)
// [@ARCH}@]
// [@ARCH_UNALIGNED_RW{@]
// \def ASMJIT_ARCH_UNALIGNED_16
// True if the target architecture allows unaligned 16-bit reads and writes.
//
// \def ASMJIT_ARCH_UNALIGNED_32
// True if the target architecture allows unaligned 32-bit reads and writes.
//
// \def ASMJIT_ARCH_UNALIGNED_64
// True if the target architecture allows unaligned 64-bit reads and writes.
#define ASMJIT_ARCH_UNALIGNED_16 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
#define ASMJIT_ARCH_UNALIGNED_32 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
#define ASMJIT_ARCH_UNALIGNED_64 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
// [@ARCH_UNALIGNED_RW}@]
// ============================================================================
// [asmjit::Build - CC]
// ============================================================================
// [@CC{@]
// \def ASMJIT_CC_CLANG
// True if the detected C++ compiler is CLANG (contains normalized CLANG version).
//
// \def ASMJIT_CC_CODEGEAR
// True if the detected C++ compiler is CODEGEAR or BORLAND (version not normalized).
//
// \def ASMJIT_CC_GCC
// True if the detected C++ compiler is GCC (contains normalized GCC version).
//
// \def ASMJIT_CC_MSC
// True if the detected C++ compiler is MSC (contains normalized MSC version).
//
// \def ASMJIT_CC_MINGW
// Defined to 32 or 64 in case this is a MINGW, otherwise 0.
#define ASMJIT_CC_CLANG 0
#define ASMJIT_CC_CODEGEAR 0
#define ASMJIT_CC_GCC 0
#define ASMJIT_CC_MSC 0
#if defined(__CODEGEARC__)
# undef ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_CODEGEAR (__CODEGEARC__)
#elif defined(__BORLANDC__)
# undef ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_CODEGEAR (__BORLANDC__)
#elif defined(__clang__) && defined(__clang_minor__)
# undef ASMJIT_CC_CLANG
# define ASMJIT_CC_CLANG (__clang_major__ * 10000000 + __clang_minor__ * 100000 + __clang_patchlevel__)
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
# undef ASMJIT_CC_GCC
# define ASMJIT_CC_GCC (__GNUC__ * 10000000 + __GNUC_MINOR__ * 100000 + __GNUC_PATCHLEVEL__)
#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
# undef ASMJIT_CC_MSC
# if _MSC_VER == _MSC_FULL_VER / 10000
# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 10000))
# else
# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 100000))
# endif
#else
# error "[asmjit] Unable to detect the C/C++ compiler."
#endif
#if ASMJIT_CC_GCC && defined(__GXX_EXPERIMENTAL_CXX0X__)
# define ASMJIT_CC_GCC_CXX0X 1
#else
# define ASMJIT_CC_GCC_CXX0X 0
#endif
#if defined(__MINGW64__)
# define ASMJIT_CC_MINGW 64
#elif defined(__MINGW32__)
# define ASMJIT_CC_MINGW 32
#else
# define ASMJIT_CC_MINGW 0
#endif
#define ASMJIT_CC_CODEGEAR_EQ(x, y, z) (ASMJIT_CC_CODEGEAR == (x << 8) + y)
#define ASMJIT_CC_CODEGEAR_GE(x, y, z) (ASMJIT_CC_CODEGEAR >= (x << 8) + y)
#define ASMJIT_CC_CLANG_EQ(x, y, z) (ASMJIT_CC_CLANG == x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_CLANG_GE(x, y, z) (ASMJIT_CC_CLANG >= x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_GCC_EQ(x, y, z) (ASMJIT_CC_GCC == x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_GCC_GE(x, y, z) (ASMJIT_CC_GCC >= x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_MSC_EQ(x, y, z) (ASMJIT_CC_MSC == x * 10000000 + y * 100000 + z)
#define ASMJIT_CC_MSC_GE(x, y, z) (ASMJIT_CC_MSC >= x * 10000000 + y * 100000 + z)
// [@CC}@]
// [@CC_FEATURES{@]
// \def ASMJIT_CC_HAS_NATIVE_CHAR
// True if the C++ compiler treats char as a native type.
//
// \def ASMJIT_CC_HAS_NATIVE_WCHAR_T
// True if the C++ compiler treats wchar_t as a native type.
//
// \def ASMJIT_CC_HAS_NATIVE_CHAR16_T
// True if the C++ compiler treats char16_t as a native type.
//
// \def ASMJIT_CC_HAS_NATIVE_CHAR32_T
// True if the C++ compiler treats char32_t as a native type.
//
// \def ASMJIT_CC_HAS_OVERRIDE
// True if the C++ compiler supports override keyword.
//
// \def ASMJIT_CC_HAS_NOEXCEPT
// True if the C++ compiler supports noexcept keyword.
#if ASMJIT_CC_CLANG
# define ASMJIT_CC_HAS_ATTRIBUTE (1)
# define ASMJIT_CC_HAS_BUILTIN (1)
# define ASMJIT_CC_HAS_DECLSPEC (0)
# define ASMJIT_CC_HAS_ALIGNAS (__has_extension(__cxx_alignas__))
# define ASMJIT_CC_HAS_ALIGNOF (__has_extension(__cxx_alignof__))
# define ASMJIT_CC_HAS_ASSUME (0)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (__has_attribute(__aligned__))
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(__always_inline__))
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (__has_attribute(__noinline__))
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (__has_attribute(__noreturn__))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable))
# define ASMJIT_CC_HAS_CONSTEXPR (__has_extension(__cxx_constexpr__))
# define ASMJIT_CC_HAS_DECLTYPE (__has_extension(__cxx_decltype__))
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (__has_extension(__cxx_defaulted_functions__))
# define ASMJIT_CC_HAS_DELETE_FUNCTION (__has_extension(__cxx_deleted_functions__))
# define ASMJIT_CC_HAS_FINAL (__has_extension(__cxx_override_control__))
# define ASMJIT_CC_HAS_INITIALIZER_LIST (__has_extension(__cxx_generalized_initializers__))
# define ASMJIT_CC_HAS_LAMBDA (__has_extension(__cxx_lambdas__))
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (__has_extension(__cxx_unicode_literals__))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (__has_extension(__cxx_unicode_literals__))
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NOEXCEPT (__has_extension(__cxx_noexcept__))
# define ASMJIT_CC_HAS_NULLPTR (__has_extension(__cxx_nullptr__))
# define ASMJIT_CC_HAS_OVERRIDE (__has_extension(__cxx_override_control__))
# define ASMJIT_CC_HAS_RVALUE (__has_extension(__cxx_rvalue_references__))
# define ASMJIT_CC_HAS_STATIC_ASSERT (__has_extension(__cxx_static_assert__))
#endif
#if ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_HAS_ATTRIBUTE (0)
# define ASMJIT_CC_HAS_BUILTIN (0)
# define ASMJIT_CC_HAS_DECLSPEC (1)
# define ASMJIT_CC_HAS_ALIGNAS (0)
# define ASMJIT_CC_HAS_ALIGNOF (0)
# define ASMJIT_CC_HAS_ASSUME (0)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_CONSTEXPR (0)
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (0)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (0)
# define ASMJIT_CC_HAS_FINAL (0)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (0)
# define ASMJIT_CC_HAS_LAMBDA (0)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (0)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (0)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NOEXCEPT (0)
# define ASMJIT_CC_HAS_NULLPTR (0)
# define ASMJIT_CC_HAS_OVERRIDE (0)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_CODEGEAR >= 0x0610)
#endif
#if ASMJIT_CC_GCC
# define ASMJIT_CC_HAS_ATTRIBUTE (1)
# define ASMJIT_CC_HAS_BUILTIN (1)
# define ASMJIT_CC_HAS_DECLSPEC (0)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_ASSUME (0)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_GCC_GE(2, 7, 0))
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_GCC_GE(4, 4, 0) && !ASMJIT_CC_MINGW)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_GCC_GE(3, 4, 0) && !ASMJIT_CC_MINGW)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_GCC_GE(2, 5, 0))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (ASMJIT_CC_GCC_GE(4, 7, 0))
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (1)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_GCC_CXX0X)
#endif
#if ASMJIT_CC_MSC
# define ASMJIT_CC_HAS_ATTRIBUTE (0)
# define ASMJIT_CC_HAS_BUILTIN (0)
# define ASMJIT_CC_HAS_DECLSPEC (1)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_ASSUME (1)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (1)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (1)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (1)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (1)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_MSC_GE(14, 0, 0))
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# if defined(_NATIVE_WCHAR_T_DEFINED)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# else
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (0)
# endif
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_MSC_GE(14, 0, 0))
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_MSC_GE(16, 0, 0))
#endif
#if !ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (0)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (0)
#endif
#if !ASMJIT_CC_HAS_BUILTIN
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (0)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (0)
#endif
#if !ASMJIT_CC_HAS_DECLSPEC
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (0)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (0)
#endif
// [@CC_FEATURES}@]
// [@CC_API{@]
// \def ASMJIT_API
// The decorated function is asmjit API and should be exported.
#if !defined(ASMJIT_API)
# if defined(ASMJIT_STATIC)
# define ASMJIT_API
# elif ASMJIT_OS_WINDOWS
# if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_CC_MINGW
# if defined(ASMJIT_EXPORTS)
# define ASMJIT_API __attribute__((__dllexport__))
# else
# define ASMJIT_API __attribute__((__dllimport__))
# endif
# else
# if defined(ASMJIT_EXPORTS)
# define ASMJIT_API __declspec(dllexport)
# else
# define ASMJIT_API __declspec(dllimport)
# endif
# endif
# else
# if ASMJIT_CC_CLANG || ASMJIT_CC_GCC_GE(4, 0, 0)
# define ASMJIT_API __attribute__((__visibility__("default")))
# endif
# endif
#endif
// [@CC_API}@]
// [@CC_VARAPI{@]
// \def ASMJIT_VARAPI
// The decorated variable is part of asmjit API and is exported.
#if !defined(ASMJIT_VARAPI)
# define ASMJIT_VARAPI extern ASMJIT_API
#endif
// [@CC_VARAPI}@]
// [@CC_VIRTAPI{@]
// \def ASMJIT_VIRTAPI
// The decorated class has a virtual table and is part of asmjit API.
//
// This is basically a workaround. When using MSVC and marking class as DLL
// export everything gets exported, which is unwanted in most projects. MSVC
// automatically exports typeinfo and vtable if at least one symbol of the
// class is exported. However, GCC has some strange behavior that even if
// one or more symbol is exported it doesn't export typeinfo unless the
// class itself is decorated with "visibility(default)" (i.e. asmjit_API).
#if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_OS_WINDOWS
# define ASMJIT_VIRTAPI ASMJIT_API
#else
# define ASMJIT_VIRTAPI
#endif
// [@CC_VIRTAPI}@]
// [@CC_INLINE{@]
// \def ASMJIT_INLINE
// Always inline the decorated function.
#if ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE
# define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE
# define ASMJIT_INLINE __forceinline
#else
# define ASMJIT_INLINE inline
#endif
// [@CC_INLINE}@]
// [@CC_NOINLINE{@]
// \def ASMJIT_NOINLINE
// Never inline the decorated function.
#if ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE
# define ASMJIT_NOINLINE __attribute__((__noinline__))
#elif ASMJIT_CC_HAS_DECLSPEC_NOINLINE
# define ASMJIT_NOINLINE __declspec(noinline)
#else
# define ASMJIT_NOINLINE
#endif
// [@CC_NOINLINE}@]
// [@CC_NORETURN{@]
// \def ASMJIT_NORETURN
// The decorated function never returns (exit, assertion failure, etc...).
#if ASMJIT_CC_HAS_ATTRIBUTE_NORETURN
# define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif ASMJIT_CC_HAS_DECLSPEC_NORETURN
# define ASMJIT_NORETURN __declspec(noreturn)
#else
# define ASMJIT_NORETURN
#endif
// [@CC_NORETURN}@]
// [@CC_CDECL{@]
// \def ASMJIT_CDECL
// Standard C function calling convention decorator (__cdecl).
#if ASMJIT_ARCH_X86
# if ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_CDECL __attribute__((__cdecl__))
# else
# define ASMJIT_CDECL __cdecl
# endif
#else
# define ASMJIT_CDECL
#endif
// [@CC_CDECL}@]
// [@CC_STDCALL{@]
// \def ASMJIT_STDCALL
// StdCall function calling convention decorator (__stdcall).
#if ASMJIT_ARCH_X86
# if ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_STDCALL __attribute__((__stdcall__))
# else
# define ASMJIT_STDCALL __stdcall
# endif
#else
# define ASMJIT_STDCALL
#endif
// [@CC_STDCALL}@]
// [@CC_FASTCALL{@]
// \def ASMJIT_FASTCALL
// FastCall function calling convention decorator (__fastcall).
#if ASMJIT_ARCH_X86
# if ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_FASTCALL __attribute__((__fastcall__))
# else
# define ASMJIT_FASTCALL __fastcall
# endif
#else
# define ASMJIT_FASTCALL
#endif
// [@CC_FASTCALL}@]
// [@CC_REGPARM{@]
// \def ASMJIT_REGPARM(n)
// A custom calling convention which passes n arguments in registers.
#if ASMJIT_ARCH_X86 && (ASMJIT_CC_GCC || ASMJIT_CC_CLANG)
# define ASMJIT_REGPARM(n) __attribute__((__regparm__(n)))
#else
# define ASMJIT_REGPARM(n)
#endif
// [@CC_REGPARM}@]
// [@CC_NOEXCEPT{@]
// \def ASMJIT_NOEXCEPT
// The decorated function never throws an exception (noexcept).
#if ASMJIT_CC_HAS_NOEXCEPT
# define ASMJIT_NOEXCEPT noexcept
#else
# define ASMJIT_NOEXCEPT
#endif
// [@CC_NOEXCEPT}@]
// [@CC_NOP{@]
// \def ASMJIT_NOP
// No operation.
#if !defined(ASMJIT_NOP)
# define ASMJIT_NOP ((void)0)
#endif
// [@CC_NOP}@]
// [@CC_ASSUME{@]
// \def ASMJIT_ASSUME(exp)
// Assume that the expression exp is always true.
#if ASMJIT_CC_HAS_ASSUME
# define ASMJIT_ASSUME(exp) __assume(exp)
#elif ASMJIT_CC_HAS_BUILTIN_ASSUME
# define ASMJIT_ASSUME(exp) __builtin_assume(exp)
#elif ASMJIT_CC_HAS_BUILTIN_UNREACHABLE
# define ASMJIT_ASSUME(exp) do { if (!(exp)) __builtin_unreachable(); } while (0)
#else
# define ASMJIT_ASSUME(exp) ((void)0)
#endif
// [@CC_ASSUME}@]
// [@CC_ASSUME_ALIGNED{@]
// \def ASMJIT_ASSUME_ALIGNED(p, alignment)
// Assume that the pointer 'p' is aligned to at least 'alignment' bytes.
#if ASMJIT_CC_HAS_ASSUME_ALIGNED
# define ASMJIT_ASSUME_ALIGNED(p, alignment) __assume_aligned(p, alignment)
#elif ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED
# define ASMJIT_ASSUME_ALIGNED(p, alignment) p = __builtin_assume_aligned(p, alignment)
#else
# define ASMJIT_ASSUME_ALIGNED(p, alignment) ((void)0)
#endif
// [@CC_ASSUME_ALIGNED}@]
// [@CC_EXPECT{@]
// \def ASMJIT_LIKELY(exp)
// Expression exp is likely to be true.
//
// \def ASMJIT_UNLIKELY(exp)
// Expression exp is likely to be false.
#if ASMJIT_HAS_BUILTIN_EXPECT
# define ASMJIT_LIKELY(exp) __builtin_expect(!!(exp), 1)
# define ASMJIT_UNLIKELY(exp) __builtin_expect(!!(exp), 0)
#else
# define ASMJIT_LIKELY(exp) exp
# define ASMJIT_UNLIKELY(exp) exp
#endif
// [@CC_EXPECT}@]
// [@CC_FALLTHROUGH{@]
// \def ASMJIT_FALLTHROUGH
// The code falls through annotation (switch / case).
#if ASMJIT_CC_CLANG && __cplusplus >= 201103L
# define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#else
# define ASMJIT_FALLTHROUGH (void)0
#endif
// [@CC_FALLTHROUGH}@]
// [@CC_UNUSED{@]
// \def ASMJIT_UNUSED(x)
// Mark a variable x as unused.
#define ASMJIT_UNUSED(x) (void)(x)
// [@CC_UNUSED}@]
// [@CC_OFFSET_OF{@]
// \def ASMJIT_OFFSET_OF(x, y).
// Get the offset of a member y of a struct x at compile-time.
#define ASMJIT_OFFSET_OF(x, y) ((int)(intptr_t)((const char*)&((const x*)0x1)->y) - 1)
// [@CC_OFFSET_OF}@]
// [@CC_ARRAY_SIZE{@]
// \def ASMJIT_ARRAY_SIZE(x)
// Get the array size of x at compile-time.
#define ASMJIT_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
// [@CC_ARRAY_SIZE}@]
// ============================================================================
// [asmjit::Build - STDTYPES]
// ============================================================================
// [@STDTYPES{@]
#if defined(__MINGW32__) || defined(__MINGW64__)
# include <sys/types.h>
#endif
#if defined(_MSC_VER) && (_MSC_VER < 1600)
# include <limits.h>
# if !defined(ASMJIT_SUPPRESS_STD_TYPES)
# if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef signed __int64 int64_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
# else
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
# endif
# endif
# define ASMJIT_INT64_C(x) (x##i64)
# define ASMJIT_UINT64_C(x) (x##ui64)
#else
# include <stdint.h>
# include <limits.h>
# define ASMJIT_INT64_C(x) (x##ll)
# define ASMJIT_UINT64_C(x) (x##ull)
#endif
// [@STDTYPES}@]
// ============================================================================
// [asmjit::Build - Dependencies]
// ============================================================================
#include <new>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if ASMJIT_OS_POSIX
# include <pthread.h>
#endif // ASMJIT_OS_POSIX
// ============================================================================
// [asmjit::Build - Additional]
// ============================================================================
// Build host architecture if no architecture is selected.
#if !defined(ASMJIT_BUILD_HOST) && \
!defined(ASMJIT_BUILD_X86) && \
!defined(ASMJIT_BUILD_X64)
# define ASMJIT_BUILD_HOST
#endif
// Autodetect host architecture if enabled.
#if defined(ASMJIT_BUILD_HOST)
# if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86)
# define ASMJIT_BUILD_X86
# endif // ASMJIT_ARCH_X86 && !ASMJIT_BUILD_X86
# if ASMJIT_ARCH_X64 && !defined(ASMJIT_BUILD_X64)
# define ASMJIT_BUILD_X64
# endif // ASMJIT_ARCH_X64 && !ASMJIT_BUILD_X64
#endif // ASMJIT_BUILD_HOST
#if defined(_MSC_VER) && _MSC_VER >= 1400
# define ASMJIT_ENUM(name) enum name : uint32_t
#else
# define ASMJIT_ENUM(name) enum name
#endif
#if ASMJIT_ARCH_LE
# define _ASMJIT_ARCH_INDEX(total, index) (index)
#else
# define _ASMJIT_ARCH_INDEX(total, index) ((total) - 1 - (index))
#endif
#if !defined(ASMJIT_ALLOC) && !defined(ASMJIT_REALLOC) && !defined(ASMJIT_FREE)
# define ASMJIT_ALLOC(size) ::malloc(size)
# define ASMJIT_REALLOC(ptr, size) ::realloc(ptr, size)
# define ASMJIT_FREE(ptr) ::free(ptr)
#else
# if !defined(ASMJIT_ALLOC) || !defined(ASMJIT_REALLOC) || !defined(ASMJIT_FREE)
# error "[asmjit] You must provide ASMJIT_ALLOC, ASMJIT_REALLOC and ASMJIT_FREE."
# endif
#endif // !ASMJIT_ALLOC && !ASMJIT_REALLOC && !ASMJIT_FREE
#define ASMJIT_NO_COPY(...) \
private: \
ASMJIT_INLINE __VA_ARGS__(const __VA_ARGS__& other) ASMJIT_NOEXCEPT; \
ASMJIT_INLINE __VA_ARGS__& operator=(const __VA_ARGS__& other) ASMJIT_NOEXCEPT; \
public:
// ============================================================================
// [asmjit::Build - Relative Path]
// ============================================================================
namespace asmjit {
namespace DebugUtils {
// Workaround that is used to convert an absolute path to a relative one at
// a C macro level, used by asserts and tracing. This workaround is needed
// as some build systems always convert the source code files to use absolute
// paths. Please note that if absolute paths are used this doesn't remove them
// from the compiled binary and can be still considered a security risk.
enum {
kSourceRelativePathOffset = int(sizeof(__FILE__) - sizeof("asmjit/build.h"))
};
// ASMJIT_TRACE is only used by sources and private headers. It's safe to make
// it unavailable outside of AsmJit.
#if defined(ASMJIT_EXPORTS)
static inline int disabledTrace(...) { return 0; }
# if defined(ASMJIT_TRACE)
# define ASMJIT_TSEC(section) section
# define ASMJIT_TLOG ::printf
# else
# define ASMJIT_TSEC(section) ASMJIT_NOP
# define ASMJIT_TLOG 0 && ::asmjit::DebugUtils::disabledTrace
# endif // ASMJIT_TRACE
#endif // ASMJIT_EXPORTS
} // DebugUtils namespace
} // asmjit namespace
// ============================================================================
// [asmjit::Build - Test]
// ============================================================================
// Include a unit testing package if this is a `asmjit_test` build.
#if defined(ASMJIT_TEST)
# include "../test/broken.h"
#endif // ASMJIT_TEST
// [Guard]
#endif // _ASMJIT_BUILD_H

53
DynamicHooks/thirdparty/AsmJit/host.h vendored Normal file
View File

@ -0,0 +1,53 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_HOST_H
#define _ASMJIT_HOST_H
// [Dependencies]
#include "./base.h"
// [X86 / X64]
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
#include "./x86.h"
namespace asmjit {
// Define `asmjit::host` namespace wrapping `asmjit::x86`.
namespace host { using namespace ::asmjit::x86; }
// Define host assembler.
typedef X86Assembler HostAssembler;
// Define host operands.
typedef X86GpReg GpReg;
typedef X86FpReg FpReg;
typedef X86MmReg MmReg;
typedef X86XmmReg XmmReg;
typedef X86YmmReg YmmReg;
typedef X86SegReg SegReg;
typedef X86Mem Mem;
// Define host compiler and related.
#if !defined(ASMJIT_DISABLE_COMPILER)
typedef X86Compiler HostCompiler;
typedef X86CallNode HostCallNode;
typedef X86FuncDecl HostFuncDecl;
typedef X86FuncNode HostFuncNode;
typedef X86GpVar GpVar;
typedef X86MmVar MmVar;
typedef X86XmmVar XmmVar;
typedef X86YmmVar YmmVar;
#endif // !ASMJIT_DISABLE_COMPILER
} // asmjit namespace
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// [Guard]
#endif // _ASMJIT_HOST_H

21
DynamicHooks/thirdparty/AsmJit/x86.h vendored Normal file
View File

@ -0,0 +1,21 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_H
#define _ASMJIT_X86_H
// [Dependencies]
#include "./base.h"
#include "./x86/x86assembler.h"
#include "./x86/x86compiler.h"
#include "./x86/x86compilerfunc.h"
#include "./x86/x86inst.h"
#include "./x86/x86operand.h"
// [Guard]
#endif // _ASMJIT_X86_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,860 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64))
// [Dependencies]
#include "../base/containers.h"
#include "../base/utils.h"
#include "../x86/x86assembler.h"
#include "../x86/x86compiler.h"
#include "../x86/x86compilercontext_p.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Debug]
// ============================================================================
#if !defined(ASMJIT_DEBUG)
#define ASMJIT_ASSERT_OPERAND(op) \
do {} while(0)
#else
#define ASMJIT_ASSERT_OPERAND(op) \
do { \
if (op.isVar() || op.isLabel()) { \
ASMJIT_ASSERT(op.getId() != kInvalidValue); \
} \
} while(0)
#endif
// ============================================================================
// [asmjit::X86VarInfo]
// ============================================================================
#define F(flag) VarInfo::kFlag##flag
const VarInfo _x86VarInfo[] = {
{ kVarTypeInt8 , 1 , kX86RegClassGp , kX86RegTypeGpbLo, 0 , "gpb" },
{ kVarTypeUInt8 , 1 , kX86RegClassGp , kX86RegTypeGpbLo, 0 , "gpb" },
{ kVarTypeInt16 , 2 , kX86RegClassGp , kX86RegTypeGpw , 0 , "gpw" },
{ kVarTypeUInt16 , 2 , kX86RegClassGp , kX86RegTypeGpw , 0 , "gpw" },
{ kVarTypeInt32 , 4 , kX86RegClassGp , kX86RegTypeGpd , 0 , "gpd" },
{ kVarTypeUInt32 , 4 , kX86RegClassGp , kX86RegTypeGpd , 0 , "gpd" },
{ kVarTypeInt64 , 8 , kX86RegClassGp , kX86RegTypeGpq , 0 , "gpq" },
{ kVarTypeUInt64 , 8 , kX86RegClassGp , kX86RegTypeGpq , 0 , "gpq" },
{ kVarTypeIntPtr , 0 , kX86RegClassGp , 0 , 0 , "" }, // Abstract.
{ kVarTypeUIntPtr , 0 , kX86RegClassGp , 0 , 0 , "" }, // Abstract.
{ kVarTypeFp32 , 4 , kX86RegClassFp , kX86RegTypeFp , F(SP) , "fp" },
{ kVarTypeFp64 , 8 , kX86RegClassFp , kX86RegTypeFp , F(DP) , "fp" },
{ kX86VarTypeMm , 8 , kX86RegClassMm , kX86RegTypeMm , 0 | F(SIMD), "mm" },
{ kX86VarTypeK , 8 , kX86RegClassK , kX86RegTypeK , 0 , "k" },
{ kX86VarTypeXmm , 16, kX86RegClassXyz, kX86RegTypeXmm , 0 | F(SIMD), "xmm" },
{ kX86VarTypeXmmSs, 4 , kX86RegClassXyz, kX86RegTypeXmm , F(SP) , "xmm" },
{ kX86VarTypeXmmPs, 16, kX86RegClassXyz, kX86RegTypeXmm , F(SP) | F(SIMD), "xmm" },
{ kX86VarTypeXmmSd, 8 , kX86RegClassXyz, kX86RegTypeXmm , F(DP) , "xmm" },
{ kX86VarTypeXmmPd, 16, kX86RegClassXyz, kX86RegTypeXmm , F(DP) | F(SIMD), "xmm" },
{ kX86VarTypeYmm , 32, kX86RegClassXyz, kX86RegTypeYmm , 0 | F(SIMD), "ymm" },
{ kX86VarTypeYmmPs, 32, kX86RegClassXyz, kX86RegTypeYmm , F(SP) | F(SIMD), "ymm" },
{ kX86VarTypeYmmPd, 32, kX86RegClassXyz, kX86RegTypeYmm , F(DP) | F(SIMD), "ymm" },
{ kX86VarTypeZmm , 64, kX86RegClassXyz, kX86RegTypeZmm , 0 | F(SIMD), "zmm" },
{ kX86VarTypeZmmPs, 64, kX86RegClassXyz, kX86RegTypeZmm , F(SP) | F(SIMD), "zmm" },
{ kX86VarTypeZmmPd, 64, kX86RegClassXyz, kX86RegTypeZmm , F(DP) | F(SIMD), "zmm" }
};
#undef F
#if defined(ASMJIT_BUILD_X86)
const uint8_t _x86VarMapping[kX86VarTypeCount] = {
/* 00: kVarTypeInt8 */ kVarTypeInt8,
/* 01: kVarTypeUInt8 */ kVarTypeUInt8,
/* 02: kVarTypeInt16 */ kVarTypeInt16,
/* 03: kVarTypeUInt16 */ kVarTypeUInt16,
/* 04: kVarTypeInt32 */ kVarTypeInt32,
/* 05: kVarTypeUInt32 */ kVarTypeUInt32,
/* 06: kVarTypeInt64 */ kInvalidVar, // Invalid in 32-bit mode.
/* 07: kVarTypeUInt64 */ kInvalidVar, // Invalid in 32-bit mode.
/* 08: kVarTypeIntPtr */ kVarTypeInt32, // Remapped to Int32.
/* 09: kVarTypeUIntPtr */ kVarTypeUInt32, // Remapped to UInt32.
/* 10: kVarTypeFp32 */ kVarTypeFp32,
/* 11: kVarTypeFp64 */ kVarTypeFp64,
/* 12: kX86VarTypeMm */ kX86VarTypeMm,
/* 13: kX86VarTypeK */ kX86VarTypeK,
/* 14: kX86VarTypeXmm */ kX86VarTypeXmm,
/* 15: kX86VarTypeXmmSs */ kX86VarTypeXmmSs,
/* 16: kX86VarTypeXmmPs */ kX86VarTypeXmmPs,
/* 17: kX86VarTypeXmmSd */ kX86VarTypeXmmSd,
/* 18: kX86VarTypeXmmPd */ kX86VarTypeXmmPd,
/* 19: kX86VarTypeYmm */ kX86VarTypeYmm,
/* 20: kX86VarTypeYmmPs */ kX86VarTypeYmmPs,
/* 21: kX86VarTypeYmmPd */ kX86VarTypeYmmPd,
/* 22: kX86VarTypeZmm */ kX86VarTypeZmm,
/* 23: kX86VarTypeZmmPs */ kX86VarTypeZmmPs,
/* 24: kX86VarTypeZmmPd */ kX86VarTypeZmmPd
};
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_X64)
const uint8_t _x64VarMapping[kX86VarTypeCount] = {
/* 00: kVarTypeInt8 */ kVarTypeInt8,
/* 01: kVarTypeUInt8 */ kVarTypeUInt8,
/* 02: kVarTypeInt16 */ kVarTypeInt16,
/* 03: kVarTypeUInt16 */ kVarTypeUInt16,
/* 04: kVarTypeInt32 */ kVarTypeInt32,
/* 05: kVarTypeUInt32 */ kVarTypeUInt32,
/* 06: kVarTypeInt64 */ kVarTypeInt64,
/* 07: kVarTypeUInt64 */ kVarTypeUInt64,
/* 08: kVarTypeIntPtr */ kVarTypeInt64, // Remapped to Int64.
/* 09: kVarTypeUIntPtr */ kVarTypeUInt64, // Remapped to UInt64.
/* 10: kVarTypeFp32 */ kVarTypeFp32,
/* 11: kVarTypeFp64 */ kVarTypeFp64,
/* 12: kX86VarTypeMm */ kX86VarTypeMm,
/* 13: kX86VarTypeK */ kX86VarTypeK,
/* 14: kX86VarTypeXmm */ kX86VarTypeXmm,
/* 15: kX86VarTypeXmmSs */ kX86VarTypeXmmSs,
/* 16: kX86VarTypeXmmPs */ kX86VarTypeXmmPs,
/* 17: kX86VarTypeXmmSd */ kX86VarTypeXmmSd,
/* 18: kX86VarTypeXmmPd */ kX86VarTypeXmmPd,
/* 19: kX86VarTypeYmm */ kX86VarTypeYmm,
/* 20: kX86VarTypeYmmPs */ kX86VarTypeYmmPs,
/* 21: kX86VarTypeYmmPd */ kX86VarTypeYmmPd,
/* 22: kX86VarTypeZmm */ kX86VarTypeZmm,
/* 23: kX86VarTypeZmmPs */ kX86VarTypeZmmPs,
/* 24: kX86VarTypeZmmPd */ kX86VarTypeZmmPd
};
#endif // ASMJIT_BUILD_X64
// ============================================================================
// [asmjit::X86CallNode - Arg / Ret]
// ============================================================================
bool X86CallNode::_setArg(uint32_t i, const Operand& op) noexcept {
if ((i & ~kFuncArgHi) >= _x86Decl.getNumArgs())
return false;
_args[i] = op;
return true;
}
bool X86CallNode::_setRet(uint32_t i, const Operand& op) noexcept {
if (i >= 2)
return false;
_ret[i] = op;
return true;
}
// ============================================================================
// [asmjit::X86Compiler - Construction / Destruction]
// ============================================================================
X86Compiler::X86Compiler(X86Assembler* assembler) noexcept
: Compiler(),
zax(NoInit),
zcx(NoInit),
zdx(NoInit),
zbx(NoInit),
zsp(NoInit),
zbp(NoInit),
zsi(NoInit),
zdi(NoInit) {
_regCount.reset();
zax = x86::noGpReg;
zcx = x86::noGpReg;
zdx = x86::noGpReg;
zbx = x86::noGpReg;
zsp = x86::noGpReg;
zbp = x86::noGpReg;
zsi = x86::noGpReg;
zdi = x86::noGpReg;
if (assembler != nullptr)
attach(assembler);
}
X86Compiler::~X86Compiler() noexcept {
reset(true);
}
// ============================================================================
// [asmjit::X86Compiler - Attach / Reset]
// ============================================================================
Error X86Compiler::attach(Assembler* assembler) noexcept {
ASMJIT_ASSERT(assembler != nullptr);
if (_assembler != nullptr)
return kErrorInvalidState;
uint32_t arch = assembler->getArch();
switch (arch) {
#if defined(ASMJIT_BUILD_X86)
case kArchX86:
_targetVarMapping = _x86VarMapping;
break;
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_X64)
case kArchX64:
_targetVarMapping = _x64VarMapping;
break;
#endif // ASMJIT_BUILD_X64
default:
return kErrorInvalidArch;
}
assembler->_attached(this);
_arch = static_cast<uint8_t>(arch);
_regSize = static_cast<uint8_t>(assembler->getRegSize());
_regCount = static_cast<X86Assembler*>(assembler)->getRegCount();
_finalized = false;
zax = static_cast<X86Assembler*>(assembler)->zax;
zcx = static_cast<X86Assembler*>(assembler)->zcx;
zdx = static_cast<X86Assembler*>(assembler)->zdx;
zbx = static_cast<X86Assembler*>(assembler)->zbx;
zsp = static_cast<X86Assembler*>(assembler)->zsp;
zbp = static_cast<X86Assembler*>(assembler)->zbp;
zsi = static_cast<X86Assembler*>(assembler)->zsi;
zdi = static_cast<X86Assembler*>(assembler)->zdi;
return kErrorOk;
}
void X86Compiler::reset(bool releaseMemory) noexcept {
Compiler::reset(releaseMemory);
_regCount.reset();
zax = x86::noGpReg;
zcx = x86::noGpReg;
zdx = x86::noGpReg;
zbx = x86::noGpReg;
zsp = x86::noGpReg;
zbp = x86::noGpReg;
zsi = x86::noGpReg;
zdi = x86::noGpReg;
}
// ============================================================================
// [asmjit::X86Compiler - Finalize]
// ============================================================================
Error X86Compiler::finalize() noexcept {
X86Assembler* assembler = getAssembler();
if (assembler == nullptr)
return kErrorOk;
// Flush the global constant pool.
if (_globalConstPoolLabel.isInitialized()) {
embedConstPool(_globalConstPoolLabel, _globalConstPool);
_globalConstPoolLabel.reset();
_globalConstPool.reset();
}
if (_firstNode == nullptr)
return kErrorOk;
X86Context context(this);
Error error = kErrorOk;
HLNode* node = _firstNode;
HLNode* start;
// Find all functions and use the `X86Context` to translate/emit them.
do {
start = node;
_resetTokenGenerator();
if (node->getType() == HLNode::kTypeFunc) {
node = static_cast<X86FuncNode*>(start)->getEnd();
error = context.compile(static_cast<X86FuncNode*>(start));
if (error != kErrorOk)
break;
}
do {
node = node->getNext();
} while (node != nullptr && node->getType() != HLNode::kTypeFunc);
error = context.serialize(assembler, start, node);
context.cleanup();
context.reset(false);
if (error != kErrorOk)
break;
} while (node != nullptr);
reset(false);
return error;
}
// ============================================================================
// [asmjit::X86Compiler - Inst]
// ============================================================================
//! Get compiler instruction item size without operands assigned.
static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) noexcept {
return Utils::inInterval<uint32_t>(code, _kX86InstIdJbegin, _kX86InstIdJend) ? sizeof(HLJump) : sizeof(HLInst);
}
static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) noexcept {
if (Utils::inInterval<uint32_t>(code, _kX86InstIdJbegin, _kX86InstIdJend)) {
HLJump* node = new(p) HLJump(self, code, options, opList, opCount);
HLLabel* jTarget = nullptr;
if ((options & kInstOptionUnfollow) == 0) {
if (opList[0].isLabel())
jTarget = self->getHLLabel(static_cast<Label&>(opList[0]));
else
options |= kInstOptionUnfollow;
}
node->orFlags(code == kX86InstIdJmp ? HLNode::kFlagIsJmp | HLNode::kFlagIsTaken : HLNode::kFlagIsJcc);
node->_target = jTarget;
node->_jumpNext = nullptr;
if (jTarget) {
node->_jumpNext = static_cast<HLJump*>(jTarget->_from);
jTarget->_from = node;
jTarget->addNumRefs();
}
// The 'jmp' is always taken, conditional jump can contain hint, we detect it.
if (code == kX86InstIdJmp)
node->orFlags(HLNode::kFlagIsTaken);
else if (options & kInstOptionTaken)
node->orFlags(HLNode::kFlagIsTaken);
node->addOptions(options);
return node;
}
else {
HLInst* node = new(p) HLInst(self, code, options, opList, opCount);
node->addOptions(options);
return node;
}
}
HLInst* X86Compiler::newInst(uint32_t code) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size));
if (inst == nullptr)
goto _NoMemory;
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), nullptr, 0);
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 1 * sizeof(Operand)));
if (inst == nullptr)
goto _NoMemory;
{
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0;
ASMJIT_ASSERT_OPERAND(o0);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 1);
}
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 2 * sizeof(Operand)));
if (inst == nullptr)
goto _NoMemory;
{
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0;
opList[1] = o1;
ASMJIT_ASSERT_OPERAND(o0);
ASMJIT_ASSERT_OPERAND(o1);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 2);
}
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 3 * sizeof(Operand)));
if (inst == nullptr)
goto _NoMemory;
{
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0;
opList[1] = o1;
opList[2] = o2;
ASMJIT_ASSERT_OPERAND(o0);
ASMJIT_ASSERT_OPERAND(o1);
ASMJIT_ASSERT_OPERAND(o2);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 3);
}
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 4 * sizeof(Operand)));
if (inst == nullptr)
goto _NoMemory;
{
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0;
opList[1] = o1;
opList[2] = o2;
opList[3] = o3;
ASMJIT_ASSERT_OPERAND(o0);
ASMJIT_ASSERT_OPERAND(o1);
ASMJIT_ASSERT_OPERAND(o2);
ASMJIT_ASSERT_OPERAND(o3);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 4);
}
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 5 * sizeof(Operand)));
if (inst == nullptr)
goto _NoMemory;
{
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0;
opList[1] = o1;
opList[2] = o2;
opList[3] = o3;
opList[4] = o4;
ASMJIT_ASSERT_OPERAND(o0);
ASMJIT_ASSERT_OPERAND(o1);
ASMJIT_ASSERT_OPERAND(o2);
ASMJIT_ASSERT_OPERAND(o3);
ASMJIT_ASSERT_OPERAND(o4);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 5);
}
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLInst* X86Compiler::emit(uint32_t code) noexcept {
HLInst* node = newInst(code);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0) noexcept {
HLInst* node = newInst(code, o0);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1) noexcept {
HLInst* node = newInst(code, o0, o1);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept {
HLInst* node = newInst(code, o0, o1, o2);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept {
HLInst* node = newInst(code, o0, o1, o2, o3);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept {
HLInst* node = newInst(code, o0, o1, o2, o3, o4);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, int o0_) noexcept {
Imm o0(o0_);
HLInst* node = newInst(code, o0);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) noexcept {
Imm o0(o0_);
HLInst* node = newInst(code, o0);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) noexcept {
Imm o1(o1_);
HLInst* node = newInst(code, o0, o1);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) noexcept {
Imm o1(o1_);
HLInst* node = newInst(code, o0, o1);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) noexcept {
Imm o2(o2_);
HLInst* node = newInst(code, o0, o1, o2);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) noexcept {
Imm o2(o2_);
HLInst* node = newInst(code, o0, o1, o2);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) noexcept {
Imm o3(o3_);
HLInst* node = newInst(code, o0, o1, o2, o3);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) noexcept {
Imm o3(o3_);
HLInst* node = newInst(code, o0, o1, o2, o3);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
// ============================================================================
// [asmjit::X86Compiler - Func]
// ============================================================================
X86FuncNode* X86Compiler::newFunc(const FuncPrototype& p) noexcept {
X86FuncNode* func = newNode<X86FuncNode>();
Error error;
if (func == nullptr)
goto _NoMemory;
// Create helper nodes.
func->_entryNode = newLabelNode();
func->_exitNode = newLabelNode();
func->_end = newNode<HLSentinel>();
if (func->_entryNode == nullptr || func->_exitNode == nullptr || func->_end == nullptr)
goto _NoMemory;
// Function prototype.
if ((error = func->_x86Decl.setPrototype(p)) != kErrorOk) {
setLastError(error);
return nullptr;
}
// Function arguments stack size. Since function requires _argStackSize to be
// set, we have to copy it from X86FuncDecl.
func->_argStackSize = func->_x86Decl.getArgStackSize();
func->_redZoneSize = static_cast<uint16_t>(func->_x86Decl.getRedZoneSize());
func->_spillZoneSize = static_cast<uint16_t>(func->_x86Decl.getSpillZoneSize());
// Expected/Required stack alignment.
func->_expectedStackAlignment = getRuntime()->getStackAlignment();
func->_requiredStackAlignment = 0;
// Allocate space for function arguments.
func->_args = nullptr;
if (func->getNumArgs() != 0) {
func->_args = _zoneAllocator.allocT<VarData*>(func->getNumArgs() * sizeof(VarData*));
if (func->_args == nullptr)
goto _NoMemory;
::memset(func->_args, 0, func->getNumArgs() * sizeof(VarData*));
}
return func;
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) noexcept {
X86FuncNode* func = newFunc(p);
if (func == nullptr) {
setLastError(kErrorNoHeapMemory);
return nullptr;
}
return static_cast<X86FuncNode*>(addFunc(func));
}
HLSentinel* X86Compiler::endFunc() noexcept {
X86FuncNode* func = getFunc();
ASMJIT_ASSERT(func != nullptr);
// Add local constant pool at the end of the function (if exist).
setCursor(func->getExitNode());
if (_localConstPoolLabel.isInitialized()) {
embedConstPool(_localConstPoolLabel, _localConstPool);
_localConstPoolLabel.reset();
_localConstPool.reset();
}
// Finalize.
func->addFuncFlags(kFuncFlagIsFinished);
_func = nullptr;
setCursor(func->getEnd());
return func->getEnd();
}
// ============================================================================
// [asmjit::X86Compiler - Ret]
// ============================================================================
HLRet* X86Compiler::newRet(const Operand& o0, const Operand& o1) noexcept {
HLRet* node = newNode<HLRet>(o0, o1);
if (node == nullptr)
goto _NoMemory;
return node;
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) noexcept {
HLRet* node = newRet(o0, o1);
if (node == nullptr)
return node;
return static_cast<HLRet*>(addNode(node));
}
// ============================================================================
// [asmjit::X86Compiler - Call]
// ============================================================================
X86CallNode* X86Compiler::newCall(const Operand& o0, const FuncPrototype& p) noexcept {
X86CallNode* node = newNode<X86CallNode>(o0);
Error error;
uint32_t nArgs;
if (node == nullptr)
goto _NoMemory;
if ((error = node->_x86Decl.setPrototype(p)) != kErrorOk) {
setLastError(error);
return nullptr;
}
// If there are no arguments skip the allocation.
if ((nArgs = p.getNumArgs()) == 0)
return node;
node->_args = static_cast<Operand*>(_zoneAllocator.alloc(nArgs * sizeof(Operand)));
if (node->_args == nullptr)
goto _NoMemory;
::memset(node->_args, 0, nArgs * sizeof(Operand));
return node;
_NoMemory:
setLastError(kErrorNoHeapMemory);
return nullptr;
}
X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) noexcept {
X86CallNode* node = newCall(o0, p);
if (node == nullptr)
return nullptr;
return static_cast<X86CallNode*>(addNode(node));
}
// ============================================================================
// [asmjit::X86Compiler - Vars]
// ============================================================================
Error X86Compiler::setArg(uint32_t argIndex, const Var& var) noexcept {
X86FuncNode* func = getFunc();
if (func == nullptr)
return kErrorInvalidArgument;
if (!isVarValid(var))
return kErrorInvalidState;
VarData* vd = getVd(var);
func->setArg(argIndex, vd);
return kErrorOk;
}
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name) noexcept {
ASMJIT_ASSERT(vType < kX86VarTypeCount);
vType = _targetVarMapping[vType];
ASMJIT_ASSERT(vType != kInvalidVar);
// The assertion won't be compiled in release build, however, we want to check
// this anyway.
if (vType == kInvalidVar) {
static_cast<X86Var*>(var)->reset();
return kErrorInvalidArgument;
}
const VarInfo& vInfo = _x86VarInfo[vType];
VarData* vd = _newVd(vInfo, name);
if (vd == nullptr) {
static_cast<X86Var*>(var)->reset();
return getLastError();
}
var->_init_packed_op_sz_w0_id(Operand::kTypeVar, vInfo.getSize(), vInfo.getRegType() << 8, vd->getId());
var->_vreg.vType = vType;
return kErrorOk;
}
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* fmt, va_list ap) noexcept {
char name[64];
vsnprintf(name, ASMJIT_ARRAY_SIZE(name), fmt, ap);
name[ASMJIT_ARRAY_SIZE(name) - 1] = '\0';
return _newVar(var, vType, name);
}
// ============================================================================
// [asmjit::X86Compiler - Stack]
// ============================================================================
Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept {
if (size == 0)
return kErrorInvalidArgument;
if (alignment > 64)
alignment = 64;
VarInfo vi = { kInvalidVar, 0, kInvalidReg , kInvalidReg, 0, "" };
VarData* vd = _newVd(vi, name);
if (vd == nullptr) {
static_cast<X86Mem*>(mem)->reset();
return getLastError();
}
vd->_size = size;
vd->_isStack = true;
vd->_alignment = static_cast<uint8_t>(alignment);
static_cast<X86Mem*>(mem)->_init(kMemTypeStackIndex, vd->getId(), 0, 0);
return kErrorOk;
}
// ============================================================================
// [asmjit::X86Compiler - Const]
// ============================================================================
Error X86Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept {
Error error = kErrorOk;
size_t offset;
Label* dstLabel;
ConstPool* dstPool;
if (scope == kConstScopeLocal) {
dstLabel = &_localConstPoolLabel;
dstPool = &_localConstPool;
}
else if (scope == kConstScopeGlobal) {
dstLabel = &_globalConstPoolLabel;
dstPool = &_globalConstPool;
}
else {
error = kErrorInvalidArgument;
goto _OnError;
}
error = dstPool->add(data, size, offset);
if (error != kErrorOk)
goto _OnError;
if (dstLabel->getId() == kInvalidValue) {
*dstLabel = newLabel();
if (!dstLabel->isInitialized()) {
error = kErrorNoHeapMemory;
goto _OnError;
}
}
*static_cast<X86Mem*>(mem) = x86::ptr(*dstLabel, static_cast<int32_t>(offset), static_cast<uint32_t>(size));
return kErrorOk;
_OnError:
return error;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER && (ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,726 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86COMPILERCONTEXT_P_H
#define _ASMJIT_X86_X86COMPILERCONTEXT_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compiler.h"
#include "../base/compilercontext_p.h"
#include "../base/utils.h"
#include "../x86/x86assembler.h"
#include "../x86/x86compiler.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86VarMap]
// ============================================================================
struct X86VarMap : public VarMap {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get variable-attributes list as VarAttr data.
ASMJIT_INLINE VarAttr* getVaList() const {
return const_cast<VarAttr*>(_list);
}
//! Get variable-attributes list as VarAttr data (by class).
ASMJIT_INLINE VarAttr* getVaListByClass(uint32_t rc) const {
return const_cast<VarAttr*>(_list) + _start.get(rc);
}
//! Get position of variables (by class).
ASMJIT_INLINE uint32_t getVaStart(uint32_t rc) const {
return _start.get(rc);
}
//! Get count of variables (by class).
ASMJIT_INLINE uint32_t getVaCountByClass(uint32_t rc) const {
return _count.get(rc);
}
//! Get VarAttr at `index`.
ASMJIT_INLINE VarAttr* getVa(uint32_t index) const {
ASMJIT_ASSERT(index < _vaCount);
return getVaList() + index;
}
//! Get VarAttr of `c` class at `index`.
ASMJIT_INLINE VarAttr* getVaByClass(uint32_t rc, uint32_t index) const {
ASMJIT_ASSERT(index < _count._regs[rc]);
return getVaListByClass(rc) + index;
}
// --------------------------------------------------------------------------
// [Utils]
// --------------------------------------------------------------------------
//! Find VarAttr.
ASMJIT_INLINE VarAttr* findVa(VarData* vd) const {
VarAttr* list = getVaList();
uint32_t count = getVaCount();
for (uint32_t i = 0; i < count; i++)
if (list[i].getVd() == vd)
return &list[i];
return nullptr;
}
//! Find VarAttr (by class).
ASMJIT_INLINE VarAttr* findVaByClass(uint32_t rc, VarData* vd) const {
VarAttr* list = getVaListByClass(rc);
uint32_t count = getVaCountByClass(rc);
for (uint32_t i = 0; i < count; i++)
if (list[i].getVd() == vd)
return &list[i];
return nullptr;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Special registers on input.
//!
//! Special register(s) restricted to one or more physical register. If there
//! is more than one special register it means that we have to duplicate the
//! variable content to all of them (it means that the same varible was used
//! by two or more operands). We forget about duplicates after the register
//! allocation finishes and marks all duplicates as non-assigned.
X86RegMask _inRegs;
//! Special registers on output.
//!
//! Special register(s) used on output. Each variable can have only one
//! special register on the output, 'X86VarMap' contains all registers from
//! all 'VarAttr's.
X86RegMask _outRegs;
//! Clobbered registers (by a function call).
X86RegMask _clobberedRegs;
//! Start indexes of variables per register class.
X86RegCount _start;
//! Count of variables per register class.
X86RegCount _count;
//! VarAttr list.
VarAttr _list[1];
};
// ============================================================================
// [asmjit::X86StateCell]
// ============================================================================
//! X86/X64 state-cell.
union X86StateCell {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getState() const {
return _state;
}
ASMJIT_INLINE void setState(uint32_t state) {
_state = static_cast<uint8_t>(state);
}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() { _packed = 0; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t _packed;
struct {
uint8_t _state : 2;
uint8_t _unused : 6;
};
};
// ============================================================================
// [asmjit::X86VarState]
// ============================================================================
//! X86/X64 state.
struct X86VarState : VarState {
enum {
//! Base index of GP registers.
kGpIndex = 0,
//! Count of GP registers.
kGpCount = 16,
//! Base index of MMX registers.
kMmIndex = kGpIndex + kGpCount,
//! Count of Mm registers.
kMmCount = 8,
//! Base index of XMM registers.
kXmmIndex = kMmIndex + kMmCount,
//! Count of XMM registers.
kXmmCount = 16,
//! Count of all registers in `X86VarState`.
kAllCount = kXmmIndex + kXmmCount
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE VarData** getList() {
return _list;
}
ASMJIT_INLINE VarData** getListByClass(uint32_t rc) {
switch (rc) {
case kX86RegClassGp : return _listGp;
case kX86RegClassMm : return _listMm;
case kX86RegClassXyz: return _listXmm;
default:
return nullptr;
}
}
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset(size_t numCells) {
::memset(this, 0, kAllCount * sizeof(VarData*) +
2 * sizeof(X86RegMask) +
numCells * sizeof(X86StateCell));
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
//! List of all allocated variables in one array.
VarData* _list[kAllCount];
struct {
//! Allocated GP registers.
VarData* _listGp[kGpCount];
//! Allocated MMX registers.
VarData* _listMm[kMmCount];
//! Allocated XMM registers.
VarData* _listXmm[kXmmCount];
};
};
//! Occupied registers (mask).
X86RegMask _occupied;
//! Modified registers (mask).
X86RegMask _modified;
//! Variables data, the length is stored in `X86Context`.
X86StateCell _cells[1];
};
// ============================================================================
// [asmjit::X86Context]
// ============================================================================
#if defined(ASMJIT_DEBUG)
# define ASMJIT_X86_CHECK_STATE _checkState();
#else
# define ASMJIT_X86_CHECK_STATE
#endif // ASMJIT_DEBUG
//! \internal
//!
//! Compiler context, used by `X86Compiler`.
//!
//! Compiler context takes care of generating function prolog and epilog, and
//! also performs register allocation. It's used during the compilation phase
//! and considered an implementation detail and asmjit consumers don't have
//! access to it. The context is used once per function and it's reset after
//! the function is processed.
struct X86Context : public Context {
ASMJIT_NO_COPY(X86Context)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `X86Context` instance.
X86Context(X86Compiler* compiler);
//! Destroy the `X86Context` instance.
virtual ~X86Context();
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
virtual void reset(bool releaseMemory = false) override;
// --------------------------------------------------------------------------
// [Arch]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isX64() const { return _zsp.getSize() == 16; }
ASMJIT_INLINE uint32_t getRegSize() const { return _zsp.getSize(); }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get compiler as `X86Compiler`.
ASMJIT_INLINE X86Compiler* getCompiler() const { return static_cast<X86Compiler*>(_compiler); }
//! Get function as `X86FuncNode`.
ASMJIT_INLINE X86FuncNode* getFunc() const { return reinterpret_cast<X86FuncNode*>(_func); }
//! Get clobbered registers (global).
ASMJIT_INLINE uint32_t getClobberedRegs(uint32_t rc) { return _clobberedRegs.get(rc); }
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86VarMap* newVarMap(uint32_t vaCount) {
return static_cast<X86VarMap*>(
_zoneAllocator.alloc(sizeof(X86VarMap) + vaCount * sizeof(VarAttr)));
}
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
void emitLoad(VarData* vd, uint32_t regIndex, const char* reason);
void emitSave(VarData* vd, uint32_t regIndex, const char* reason);
void emitMove(VarData* vd, uint32_t toRegIndex, uint32_t fromRegIndex, const char* reason);
void emitSwapGp(VarData* aVd, VarData* bVd, uint32_t aIndex, uint32_t bIndex, const char* reason);
void emitPushSequence(uint32_t regs);
void emitPopSequence(uint32_t regs);
void emitConvertVarToVar(uint32_t dstType, uint32_t dstIndex, uint32_t srcType, uint32_t srcIndex);
void emitMoveVarOnStack(uint32_t dstType, const X86Mem* dst, uint32_t srcType, uint32_t srcIndex);
void emitMoveImmOnStack(uint32_t dstType, const X86Mem* dst, const Imm* src);
void emitMoveImmToReg(uint32_t dstType, uint32_t dstIndex, const Imm* src);
// --------------------------------------------------------------------------
// [Register Management]
// --------------------------------------------------------------------------
void _checkState();
// --------------------------------------------------------------------------
// [Attach / Detach]
// --------------------------------------------------------------------------
//! Attach.
//!
//! Attach a register to the 'VarData', changing 'VarData' members to show
//! that the variable is currently alive and linking variable with the
//! current 'X86VarState'.
template<int C>
ASMJIT_INLINE void attach(VarData* vd, uint32_t regIndex, bool modified) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(regIndex != kInvalidReg);
// Prevent Esp allocation if C==Gp.
ASMJIT_ASSERT(C != kX86RegClassGp || regIndex != kX86RegIndexSp);
uint32_t regMask = Utils::mask(regIndex);
vd->setState(kVarStateReg);
vd->setModified(modified);
vd->setRegIndex(regIndex);
vd->addHomeIndex(regIndex);
_x86State.getListByClass(C)[regIndex] = vd;
_x86State._occupied.or_(C, regMask);
_x86State._modified.or_(C, static_cast<uint32_t>(modified) << regIndex);
ASMJIT_X86_CHECK_STATE
}
//! Detach.
//!
//! The opposite of 'Attach'. Detach resets the members in 'VarData'
//! (regIndex, state and changed flags) and unlinks the variable with the
//! current 'X86VarState'.
template<int C>
ASMJIT_INLINE void detach(VarData* vd, uint32_t regIndex, uint32_t vState) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getRegIndex() == regIndex);
ASMJIT_ASSERT(vState != kVarStateReg);
uint32_t regMask = Utils::mask(regIndex);
vd->setState(vState);
vd->resetRegIndex();
vd->setModified(false);
_x86State.getListByClass(C)[regIndex] = nullptr;
_x86State._occupied.andNot(C, regMask);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Rebase]
// --------------------------------------------------------------------------
//! Rebase.
//!
//! Change the register of the 'VarData' changing also the current 'X86VarState'.
//! Rebase is nearly identical to 'Detach' and 'Attach' sequence, but doesn't
//! change the `VarData`s modified flag.
template<int C>
ASMJIT_INLINE void rebase(VarData* vd, uint32_t newRegIndex, uint32_t oldRegIndex) {
ASMJIT_ASSERT(vd->getClass() == C);
uint32_t newRegMask = Utils::mask(newRegIndex);
uint32_t oldRegMask = Utils::mask(oldRegIndex);
uint32_t bothRegMask = newRegMask ^ oldRegMask;
vd->setRegIndex(newRegIndex);
_x86State.getListByClass(C)[oldRegIndex] = nullptr;
_x86State.getListByClass(C)[newRegIndex] = vd;
_x86State._occupied.xor_(C, bothRegMask);
_x86State._modified.xor_(C, bothRegMask & -static_cast<int32_t>(vd->isModified()));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Load / Save]
// --------------------------------------------------------------------------
//! Load.
//!
//! Load variable from its memory slot to a register, emitting 'Load'
//! instruction and changing the variable state to allocated.
template<int C>
ASMJIT_INLINE void load(VarData* vd, uint32_t regIndex) {
// Can be only called if variable is not allocated.
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getState() != kVarStateReg);
ASMJIT_ASSERT(vd->getRegIndex() == kInvalidReg);
emitLoad(vd, regIndex, "Load");
attach<C>(vd, regIndex, false);
ASMJIT_X86_CHECK_STATE
}
//! Save.
//!
//! Save the variable into its home location, but keep it as allocated.
template<int C>
ASMJIT_INLINE void save(VarData* vd) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getState() == kVarStateReg);
ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg);
uint32_t regIndex = vd->getRegIndex();
uint32_t regMask = Utils::mask(regIndex);
emitSave(vd, regIndex, "Save");
vd->setModified(false);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Move / Swap]
// --------------------------------------------------------------------------
//! Move a register.
//!
//! Move register from one index to another, emitting 'Move' if needed. This
//! function does nothing if register is already at the given index.
template<int C>
ASMJIT_INLINE void move(VarData* vd, uint32_t regIndex) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vd->getState() == kVarStateReg);
ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg);
uint32_t oldIndex = vd->getRegIndex();
if (regIndex != oldIndex) {
emitMove(vd, regIndex, oldIndex, "Move");
rebase<C>(vd, regIndex, oldIndex);
}
ASMJIT_X86_CHECK_STATE
}
//! Swap two registers
//!
//! It's only possible to swap Gp registers.
ASMJIT_INLINE void swapGp(VarData* aVd, VarData* bVd) {
ASMJIT_ASSERT(aVd != bVd);
ASMJIT_ASSERT(aVd->getClass() == kX86RegClassGp);
ASMJIT_ASSERT(aVd->getState() == kVarStateReg);
ASMJIT_ASSERT(aVd->getRegIndex() != kInvalidReg);
ASMJIT_ASSERT(bVd->getClass() == kX86RegClassGp);
ASMJIT_ASSERT(bVd->getState() == kVarStateReg);
ASMJIT_ASSERT(bVd->getRegIndex() != kInvalidReg);
uint32_t aIndex = aVd->getRegIndex();
uint32_t bIndex = bVd->getRegIndex();
emitSwapGp(aVd, bVd, aIndex, bIndex, "Swap");
aVd->setRegIndex(bIndex);
bVd->setRegIndex(aIndex);
_x86State.getListByClass(kX86RegClassGp)[aIndex] = bVd;
_x86State.getListByClass(kX86RegClassGp)[bIndex] = aVd;
uint32_t m = aVd->isModified() ^ bVd->isModified();
_x86State._modified.xor_(kX86RegClassGp, (m << aIndex) | (m << bIndex));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Alloc / Spill]
// --------------------------------------------------------------------------
//! Alloc.
template<int C>
ASMJIT_INLINE void alloc(VarData* vd, uint32_t regIndex) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(regIndex != kInvalidReg);
uint32_t oldRegIndex = vd->getRegIndex();
uint32_t oldState = vd->getState();
uint32_t regMask = Utils::mask(regIndex);
ASMJIT_ASSERT(_x86State.getListByClass(C)[regIndex] == nullptr || regIndex == oldRegIndex);
if (oldState != kVarStateReg) {
if (oldState == kVarStateMem)
emitLoad(vd, regIndex, "Alloc");
vd->setModified(false);
}
else if (oldRegIndex != regIndex) {
emitMove(vd, regIndex, oldRegIndex, "Alloc");
_x86State.getListByClass(C)[oldRegIndex] = nullptr;
regMask ^= Utils::mask(oldRegIndex);
}
else {
ASMJIT_X86_CHECK_STATE
return;
}
vd->setState(kVarStateReg);
vd->setRegIndex(regIndex);
vd->addHomeIndex(regIndex);
_x86State.getListByClass(C)[regIndex] = vd;
_x86State._occupied.xor_(C, regMask);
_x86State._modified.xor_(C, regMask & -static_cast<int32_t>(vd->isModified()));
ASMJIT_X86_CHECK_STATE
}
//! Spill.
//!
//! Spill variable/register, saves the content to the memory-home if modified.
template<int C>
ASMJIT_INLINE void spill(VarData* vd) {
ASMJIT_ASSERT(vd->getClass() == C);
if (vd->getState() != kVarStateReg) {
ASMJIT_X86_CHECK_STATE
return;
}
uint32_t regIndex = vd->getRegIndex();
ASMJIT_ASSERT(regIndex != kInvalidReg);
ASMJIT_ASSERT(_x86State.getListByClass(C)[regIndex] == vd);
if (vd->isModified())
emitSave(vd, regIndex, "Spill");
detach<C>(vd, regIndex, kVarStateMem);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Modify]
// --------------------------------------------------------------------------
template<int C>
ASMJIT_INLINE void modify(VarData* vd) {
ASMJIT_ASSERT(vd->getClass() == C);
uint32_t regIndex = vd->getRegIndex();
uint32_t regMask = Utils::mask(regIndex);
vd->setModified(true);
_x86State._modified.or_(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Unuse]
// --------------------------------------------------------------------------
//! Unuse.
//!
//! Unuse variable, it will be detached it if it's allocated then its state
//! will be changed to kVarStateNone.
template<int C>
ASMJIT_INLINE void unuse(VarData* vd, uint32_t vState = kVarStateNone) {
ASMJIT_ASSERT(vd->getClass() == C);
ASMJIT_ASSERT(vState != kVarStateReg);
uint32_t regIndex = vd->getRegIndex();
if (regIndex != kInvalidReg)
detach<C>(vd, regIndex, vState);
else
vd->setState(vState);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get state as `X86VarState`.
ASMJIT_INLINE X86VarState* getState() const {
return const_cast<X86VarState*>(&_x86State);
}
virtual void loadState(VarState* src);
virtual VarState* saveState();
virtual void switchState(VarState* src);
virtual void intersectStates(VarState* a, VarState* b);
// --------------------------------------------------------------------------
// [Memory]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86Mem getVarMem(VarData* vd) {
(void)getVarCell(vd);
X86Mem mem(_memSlot);
mem.setBase(vd->getId());
return mem;
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
virtual Error fetch();
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate();
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
virtual Error translate();
// --------------------------------------------------------------------------
// [Serialize]
// --------------------------------------------------------------------------
virtual Error serialize(Assembler* assembler, HLNode* start, HLNode* stop);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Count of X86/X64 registers.
X86RegCount _regCount;
//! X86/X64 stack-pointer (esp or rsp).
X86GpReg _zsp;
//! X86/X64 frame-pointer (ebp or rbp).
X86GpReg _zbp;
//! Temporary memory operand.
X86Mem _memSlot;
//! X86/X64 specific compiler state, linked to `_state`.
X86VarState _x86State;
//! Clobbered registers (for the whole function).
X86RegMask _clobberedRegs;
//! Memory cell where is stored address used to restore manually
//! aligned stack.
VarCell* _stackFrameCell;
//! Global allocable registers mask.
uint32_t _gaRegs[kX86RegClassCount];
//! Function arguments base pointer (register).
uint8_t _argBaseReg;
//! Function variables base pointer (register).
uint8_t _varBaseReg;
//! Whether to emit comments.
uint8_t _emitComments;
//! Function arguments base offset.
int32_t _argBaseOffset;
//! Function variables base offset.
int32_t _varBaseOffset;
//! Function arguments displacement.
int32_t _argActualDisp;
//! Function variables displacement.
int32_t _varActualDisp;
//! Temporary string builder used for logging.
StringBuilderTmp<256> _stringBuilder;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86COMPILERCONTEXT_P_H

View File

@ -0,0 +1,551 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64))
// [Dependencies]
#include "../x86/x86compiler.h"
#include "../x86/x86compilerfunc.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86FuncDecl - Helpers]
// ============================================================================
static ASMJIT_INLINE bool x86ArgIsInt(uint32_t aType) {
ASMJIT_ASSERT(aType < kX86VarTypeCount);
return Utils::inInterval<uint32_t>(aType, _kVarTypeIntStart, _kVarTypeIntEnd);
}
static ASMJIT_INLINE bool x86ArgIsFp(uint32_t aType) {
ASMJIT_ASSERT(aType < kX86VarTypeCount);
return Utils::inInterval<uint32_t>(aType, _kVarTypeFpStart, _kVarTypeFpEnd);
}
static ASMJIT_INLINE uint32_t x86ArgTypeToXmmType(uint32_t aType) {
if (aType == kVarTypeFp32) return kX86VarTypeXmmSs;
if (aType == kVarTypeFp64) return kX86VarTypeXmmSd;
return aType;
}
//! Get an architecture depending on the calling convention `callConv`.
//!
//! Returns `kArchNone`, `kArchX86`, or `kArchX64`.
static ASMJIT_INLINE uint32_t x86GetArchFromCConv(uint32_t callConv) {
if (Utils::inInterval<uint32_t>(callConv, _kCallConvX86Start, _kCallConvX86End)) return kArchX86;
if (Utils::inInterval<uint32_t>(callConv, _kCallConvX64Start, _kCallConvX64End)) return kArchX64;
return kArchNone;
}
// ============================================================================
// [asmjit::X86FuncDecl - SetPrototype]
// ============================================================================
#define R(_Index_) kX86RegIndex##_Index_
static uint32_t X86FuncDecl_initConv(X86FuncDecl* self, uint32_t arch, uint32_t callConv) {
// Setup defaults.
self->_argStackSize = 0;
self->_redZoneSize = 0;
self->_spillZoneSize = 0;
self->_callConv = static_cast<uint8_t>(callConv);
self->_calleePopsStack = false;
self->_argsDirection = kFuncDirRTL;
self->_passed.reset();
self->_preserved.reset();
::memset(self->_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderGp));
::memset(self->_passedOrderXyz, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderXyz));
switch (arch) {
// ------------------------------------------------------------------------
// [X86 Support]
// ------------------------------------------------------------------------
#if defined(ASMJIT_BUILD_X86)
case kArchX86: {
self->_preserved.set(kX86RegClassGp, Utils::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di)));
switch (callConv) {
case kCallConvX86CDecl:
break;
case kCallConvX86StdCall:
self->_calleePopsStack = true;
break;
case kCallConvX86MsThisCall:
self->_calleePopsStack = true;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx)));
self->_passedOrderGp[0] = R(Cx);
break;
case kCallConvX86MsFastCall:
self->_calleePopsStack = true;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Cx)));
self->_passedOrderGp[0] = R(Cx);
self->_passedOrderGp[1] = R(Dx);
break;
case kCallConvX86BorlandFastCall:
self->_calleePopsStack = true;
self->_argsDirection = kFuncDirLTR;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx), R(Cx)));
self->_passedOrderGp[0] = R(Ax);
self->_passedOrderGp[1] = R(Dx);
self->_passedOrderGp[2] = R(Cx);
break;
case kCallConvX86GccFastCall:
self->_calleePopsStack = true;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Dx)));
self->_passedOrderGp[0] = R(Cx);
self->_passedOrderGp[1] = R(Dx);
break;
case kCallConvX86GccRegParm1:
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax)));
self->_passedOrderGp[0] = R(Ax);
break;
case kCallConvX86GccRegParm2:
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx)));
self->_passedOrderGp[0] = R(Ax);
self->_passedOrderGp[1] = R(Dx);
break;
case kCallConvX86GccRegParm3:
self->_passed.set(kX86RegClassGp, Utils::mask(R(Ax), R(Dx), R(Cx)));
self->_passedOrderGp[0] = R(Ax);
self->_passedOrderGp[1] = R(Dx);
self->_passedOrderGp[2] = R(Cx);
break;
default:
return kErrorInvalidArgument;
}
return kErrorOk;
}
#endif // ASMJIT_BUILD_X86
// ------------------------------------------------------------------------
// [X64 Support]
// ------------------------------------------------------------------------
#if defined(ASMJIT_BUILD_X64)
case kArchX64: {
switch (callConv) {
case kCallConvX64Win:
self->_spillZoneSize = 32;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Cx), R(Dx), 8, 9));
self->_passedOrderGp[0] = R(Cx);
self->_passedOrderGp[1] = R(Dx);
self->_passedOrderGp[2] = 8;
self->_passedOrderGp[3] = 9;
self->_passed.set(kX86RegClassXyz, Utils::mask(0, 1, 2, 3));
self->_passedOrderXyz[0] = 0;
self->_passedOrderXyz[1] = 1;
self->_passedOrderXyz[2] = 2;
self->_passedOrderXyz[3] = 3;
self->_preserved.set(kX86RegClassGp , Utils::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di), 12, 13, 14, 15));
self->_preserved.set(kX86RegClassXyz, Utils::mask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
break;
case kCallConvX64Unix:
self->_redZoneSize = 128;
self->_passed.set(kX86RegClassGp, Utils::mask(R(Di), R(Si), R(Dx), R(Cx), 8, 9));
self->_passedOrderGp[0] = R(Di);
self->_passedOrderGp[1] = R(Si);
self->_passedOrderGp[2] = R(Dx);
self->_passedOrderGp[3] = R(Cx);
self->_passedOrderGp[4] = 8;
self->_passedOrderGp[5] = 9;
self->_passed.set(kX86RegClassXyz, Utils::mask(0, 1, 2, 3, 4, 5, 6, 7));
self->_passedOrderXyz[0] = 0;
self->_passedOrderXyz[1] = 1;
self->_passedOrderXyz[2] = 2;
self->_passedOrderXyz[3] = 3;
self->_passedOrderXyz[4] = 4;
self->_passedOrderXyz[5] = 5;
self->_passedOrderXyz[6] = 6;
self->_passedOrderXyz[7] = 7;
self->_preserved.set(kX86RegClassGp, Utils::mask(R(Bx), R(Sp), R(Bp), 12, 13, 14, 15));
break;
default:
return kErrorInvalidArgument;
}
return kErrorOk;
}
#endif // ASMJIT_BUILD_X64
default:
return kErrorInvalidArgument;
}
}
#undef R
static Error X86FuncDecl_initFunc(X86FuncDecl* self, uint32_t arch,
uint32_t ret, const uint32_t* args, uint32_t numArgs) {
ASMJIT_ASSERT(numArgs <= kFuncArgCount);
uint32_t callConv = self->_callConv;
uint32_t regSize = (arch == kArchX86) ? 4 : 8;
int32_t i = 0;
int32_t gpPos = 0;
int32_t xmmPos = 0;
int32_t stackOffset = 0;
const uint8_t* varMapping = nullptr;
#if defined(ASMJIT_BUILD_X86)
if (arch == kArchX86)
varMapping = _x86VarMapping;
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_X64)
if (arch == kArchX64)
varMapping = _x64VarMapping;
#endif // ASMJIT_BUILD_X64
ASMJIT_ASSERT(varMapping != nullptr);
self->_numArgs = static_cast<uint8_t>(numArgs);
self->_retCount = 0;
for (i = 0; i < static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
arg._varType = static_cast<uint8_t>(varMapping[args[i]]);
arg._regIndex = kInvalidReg;
arg._stackOffset = kFuncStackInvalid;
}
for (; i < kFuncArgCount; i++) {
self->_args[i].reset();
}
self->_rets[0].reset();
self->_rets[1].reset();
self->_argStackSize = 0;
self->_used.reset();
if (ret != kInvalidVar) {
ret = varMapping[ret];
switch (ret) {
case kVarTypeInt64:
case kVarTypeUInt64:
// 64-bit value is returned in EDX:EAX on x86.
#if defined(ASMJIT_BUILD_X86)
if (arch == kArchX86) {
self->_retCount = 2;
self->_rets[0]._varType = kVarTypeUInt32;
self->_rets[0]._regIndex = kX86RegIndexAx;
self->_rets[1]._varType = static_cast<uint8_t>(ret - 2);
self->_rets[1]._regIndex = kX86RegIndexDx;
}
ASMJIT_FALLTHROUGH;
#endif // ASMJIT_BUILD_X86
case kVarTypeInt8:
case kVarTypeUInt8:
case kVarTypeInt16:
case kVarTypeUInt16:
case kVarTypeInt32:
case kVarTypeUInt32:
self->_retCount = 1;
self->_rets[0]._varType = static_cast<uint8_t>(ret);
self->_rets[0]._regIndex = kX86RegIndexAx;
break;
case kX86VarTypeMm:
self->_retCount = 1;
self->_rets[0]._varType = static_cast<uint8_t>(ret);
self->_rets[0]._regIndex = 0;
break;
case kVarTypeFp32:
self->_retCount = 1;
if (arch == kArchX86) {
self->_rets[0]._varType = kVarTypeFp32;
self->_rets[0]._regIndex = 0;
}
else {
self->_rets[0]._varType = kX86VarTypeXmmSs;
self->_rets[0]._regIndex = 0;
}
break;
case kVarTypeFp64:
self->_retCount = 1;
if (arch == kArchX86) {
self->_rets[0]._varType = kVarTypeFp64;
self->_rets[0]._regIndex = 0;
}
else {
self->_rets[0]._varType = kX86VarTypeXmmSd;
self->_rets[0]._regIndex = 0;
break;
}
break;
case kX86VarTypeXmm:
case kX86VarTypeXmmSs:
case kX86VarTypeXmmSd:
case kX86VarTypeXmmPs:
case kX86VarTypeXmmPd:
self->_retCount = 1;
self->_rets[0]._varType = static_cast<uint8_t>(ret);
self->_rets[0]._regIndex = 0;
break;
}
}
if (self->_numArgs == 0)
return kErrorOk;
#if defined(ASMJIT_BUILD_X86)
if (arch == kArchX86) {
// Register arguments (Integer), always left-to-right.
for (i = 0; i != static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp))
continue;
if (self->_passedOrderGp[gpPos] == kInvalidReg)
continue;
arg._regIndex = self->_passedOrderGp[gpPos++];
self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex()));
}
// Stack arguments.
int32_t iStart = static_cast<int32_t>(numArgs - 1);
int32_t iEnd = -1;
int32_t iStep = -1;
if (self->_argsDirection == kFuncDirLTR) {
iStart = 0;
iEnd = static_cast<int32_t>(numArgs);
iStep = 1;
}
for (i = iStart; i != iEnd; i += iStep) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (arg.hasRegIndex())
continue;
if (x86ArgIsInt(varType)) {
stackOffset -= 4;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
else if (x86ArgIsFp(varType)) {
int32_t size = static_cast<int32_t>(_x86VarInfo[varType].getSize());
stackOffset -= size;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
}
}
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_X64)
if (arch == kArchX64) {
if (callConv == kCallConvX64Win) {
int32_t argMax = Utils::iMin<int32_t>(numArgs, 4);
// Register arguments (GP/XMM), always left-to-right.
for (i = 0; i != argMax; i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (x86ArgIsInt(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) {
arg._regIndex = self->_passedOrderGp[i];
self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex()));
continue;
}
if (x86ArgIsFp(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderXyz)) {
arg._varType = static_cast<uint8_t>(x86ArgTypeToXmmType(varType));
arg._regIndex = self->_passedOrderXyz[i];
self->_used.or_(kX86RegClassXyz, Utils::mask(arg.getRegIndex()));
}
}
// Stack arguments (always right-to-left).
for (i = numArgs - 1; i != -1; i--) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (arg.hasRegIndex())
continue;
if (x86ArgIsInt(varType)) {
stackOffset -= 8; // Always 8 bytes.
arg._stackOffset = stackOffset;
}
else if (x86ArgIsFp(varType)) {
stackOffset -= 8; // Always 8 bytes (float/double).
arg._stackOffset = stackOffset;
}
}
// 32 bytes shadow space (X64W calling convention specific).
stackOffset -= 4 * 8;
}
else {
// Register arguments (Gp), always left-to-right.
for (i = 0; i != static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp))
continue;
if (self->_passedOrderGp[gpPos] == kInvalidReg)
continue;
arg._regIndex = self->_passedOrderGp[gpPos++];
self->_used.or_(kX86RegClassGp, Utils::mask(arg.getRegIndex()));
}
// Register arguments (XMM), always left-to-right.
for (i = 0; i != static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (x86ArgIsFp(varType)) {
arg._varType = static_cast<uint8_t>(x86ArgTypeToXmmType(varType));
arg._regIndex = self->_passedOrderXyz[xmmPos++];
self->_used.or_(kX86RegClassXyz, Utils::mask(arg.getRegIndex()));
}
}
// Stack arguments.
for (i = numArgs - 1; i != -1; i--) {
FuncInOut& arg = self->getArg(i);
uint32_t varType = varMapping[arg.getVarType()];
if (arg.hasRegIndex())
continue;
if (x86ArgIsInt(varType)) {
stackOffset -= 8;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
else if (x86ArgIsFp(varType)) {
int32_t size = static_cast<int32_t>(_x86VarInfo[varType].getSize());
stackOffset -= size;
arg._stackOffset = static_cast<int16_t>(stackOffset);
}
}
}
}
#endif // ASMJIT_BUILD_X64
// Modify the stack offset, thus in result all parameters would have positive
// non-zero stack offset.
for (i = 0; i < static_cast<int32_t>(numArgs); i++) {
FuncInOut& arg = self->getArg(i);
if (!arg.hasRegIndex()) {
arg._stackOffset += static_cast<uint16_t>(static_cast<int32_t>(regSize) - stackOffset);
}
}
self->_argStackSize = static_cast<uint32_t>(-stackOffset);
return kErrorOk;
}
Error X86FuncDecl::setPrototype(const FuncPrototype& p) {
uint32_t callConv = p.getCallConv();
uint32_t arch = x86GetArchFromCConv(callConv);
if (arch == kArchNone)
return kErrorInvalidArgument;
if (p.getNumArgs() > kFuncArgCount)
return kErrorInvalidArgument;
// Validate that the required convention is supported by the current asmjit
// configuration, if only one target is compiled.
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64)
if (arch == kArchX64)
return kErrorInvalidState;
#endif // ASMJIT_BUILD_X86 && !ASMJIT_BUILD_X64
#if !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64)
if (arch == kArchX86)
return kErrorInvalidState;
#endif // !ASMJIT_BUILD_X86 && ASMJIT_BUILD_X64
ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initConv(this, arch, callConv));
ASMJIT_PROPAGATE_ERROR(X86FuncDecl_initFunc(this, arch, p.getRet(), p.getArgs(), p.getNumArgs()));
return kErrorOk;
}
// ============================================================================
// [asmjit::X86FuncDecl - Reset]
// ============================================================================
void X86FuncDecl::reset() {
uint32_t i;
_callConv = kCallConvNone;
_calleePopsStack = false;
_argsDirection = kFuncDirRTL;
_reserved0 = 0;
_numArgs = 0;
_retCount = 0;
_argStackSize = 0;
_redZoneSize = 0;
_spillZoneSize = 0;
for (i = 0; i < ASMJIT_ARRAY_SIZE(_args); i++)
_args[i].reset();
_rets[0].reset();
_rets[1].reset();
_used.reset();
_passed.reset();
_preserved.reset();
::memset(_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderGp));
::memset(_passedOrderXyz, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderXyz));
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER && (ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64)

View File

@ -0,0 +1,133 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86COMPILERFUNC_P_H
#define _ASMJIT_X86_X86COMPILERFUNC_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/compilerfunc.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::TypeId]
// ============================================================================
#if !defined(ASMJIT_DOCGEN)
ASMJIT_TYPE_ID(X86MmReg, kX86VarTypeMm);
ASMJIT_TYPE_ID(X86MmVar, kX86VarTypeMm);
ASMJIT_TYPE_ID(X86XmmReg, kX86VarTypeXmm);
ASMJIT_TYPE_ID(X86XmmVar, kX86VarTypeXmm);
ASMJIT_TYPE_ID(X86YmmReg, kX86VarTypeYmm);
ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm);
ASMJIT_TYPE_ID(X86ZmmReg, kX86VarTypeZmm);
ASMJIT_TYPE_ID(X86ZmmVar, kX86VarTypeZmm);
#endif // !ASMJIT_DOCGEN
// ============================================================================
// [asmjit::X86FuncDecl]
// ============================================================================
//! X86 function, including calling convention, arguments and their
//! register indices or stack positions.
struct X86FuncDecl : public FuncDecl {
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `X86FuncDecl` instance.
ASMJIT_INLINE X86FuncDecl() { reset(); }
// --------------------------------------------------------------------------
// [Accessors - X86]
// --------------------------------------------------------------------------
//! Get used registers mask for the given register class `rc`.
//!
//! NOTE: The result depends on the function calling convention AND the
//! function prototype. Returned mask contains only registers actually used
//! to pass function arguments.
ASMJIT_INLINE uint32_t getUsed(uint32_t rc) const { return _used.get(rc); }
//! Get passed registers mask for the given register class `rc`.
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE uint32_t getPassed(uint32_t rc) const { return _passed.get(rc); }
//! Get preserved registers mask for the given register class `rc`.
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE uint32_t getPreserved(uint32_t rc) const { return _preserved.get(rc); }
//! Get ther order of passed registers (GP).
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE const uint8_t* getPassedOrderGp() const { return _passedOrderGp; }
//! Get ther order of passed registers (XMM/YMM/ZMM).
//!
//! NOTE: The result depends on the function calling convention used; the
//! prototype of the function doesn't affect the mask returned.
ASMJIT_INLINE const uint8_t* getPassedOrderXyz() const { return _passedOrderXyz; }
// --------------------------------------------------------------------------
// [SetPrototype]
// --------------------------------------------------------------------------
//! Set function prototype.
//!
//! This will set function calling convention and setup arguments variables.
//!
//! NOTE: This function will allocate variables, it can be called only once.
ASMJIT_API Error setPrototype(const FuncPrototype& p);
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset();
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Used registers.
X86RegMask _used;
//! Passed registers (defined by the calling convention).
X86RegMask _passed;
//! Preserved registers (defined by the calling convention).
X86RegMask _preserved;
//! Order of registers used to pass GP function arguments.
uint8_t _passedOrderGp[8];
//! Order of registers used to pass XMM/YMM/ZMM function arguments.
uint8_t _passedOrderXyz[8];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86COMPILERFUNC_P_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,85 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
// [Dependencies]
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
namespace x86 {
// ============================================================================
// [asmjit::X86Mem - abs[]]
// ============================================================================
X86Mem ptr_abs(Ptr p, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, 0, kInvalidValue);
m._vmem.index = kInvalidValue;
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
X86Mem ptr_abs(Ptr p, const X86Reg& index, uint32_t shift, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
uint32_t flags = shift << kX86MemShiftIndex;
if (index.isGp())
flags |= X86Mem::_getGpdFlags(index);
else if (index.isXmm())
flags |= kX86MemVSibXmm << kX86MemVSibIndex;
else if (index.isYmm())
flags |= kX86MemVSibYmm << kX86MemVSibIndex;
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._vmem.index = index.getRegIndex();
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
#if !defined(ASMJIT_DISABLE_COMPILER)
X86Mem ptr_abs(Ptr p, const X86Var& index, uint32_t shift, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
uint32_t flags = shift << kX86MemShiftIndex;
const Var& index_ = reinterpret_cast<const Var&>(index);
uint32_t indexRegType = index_.getRegType();
if (indexRegType <= kX86RegTypeGpq)
flags |= X86Mem::_getGpdFlags(reinterpret_cast<const Var&>(index));
else if (indexRegType == kX86RegTypeXmm)
flags |= kX86MemVSibXmm << kX86MemVSibIndex;
else if (indexRegType == kX86RegTypeYmm)
flags |= kX86MemVSibYmm << kX86MemVSibIndex;
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._vmem.index = index_.getId();
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
#endif // !ASMJIT_DISABLE_COMPILER
} // x86 namespace
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,84 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
#define ASMJIT_EXPORTS_X86_REGS
// [Guard]
#include "../build.h"
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
// [Dependencies]
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
#define REG(type, index, size) {{{ \
Operand::kTypeReg, size, { ((type) << 8) + index }, kInvalidValue, {{ kInvalidVar, 0 }} \
}}}
#define REG_LIST_04(type, start, size) \
REG(type, start + 0, size), \
REG(type, start + 1, size), \
REG(type, start + 2, size), \
REG(type, start + 3, size)
#define REG_LIST_08(type, start, size) \
REG_LIST_04(type, start + 0, size), \
REG_LIST_04(type, start + 4, size)
#define REG_LIST_16(type, start, size) \
REG_LIST_08(type, start + 0, size), \
REG_LIST_08(type, start + 8, size)
#define REG_LIST_32(type, start, size) \
REG_LIST_16(type, start + 0, size), \
REG_LIST_16(type, start + 16, size)
const X86RegData x86RegData = {
{ REG_LIST_16(kX86RegTypeGpd , 0, 4) },
{ REG_LIST_16(kX86RegTypeGpq , 0, 8) },
{ REG_LIST_16(kX86RegTypeGpbLo, 0, 1) },
{ REG_LIST_04(kX86RegTypeGpbHi, 0, 1) },
{ REG_LIST_16(kX86RegTypeGpw , 0, 2) },
{ REG_LIST_32(kX86RegTypeXmm , 0, 16) },
{ REG_LIST_32(kX86RegTypeYmm , 0, 32) },
{ REG_LIST_32(kX86RegTypeZmm , 0, 64) },
{ REG_LIST_08(kX86RegTypeK , 0, 8) },
{ REG_LIST_08(kX86RegTypeFp , 0, 10) },
{ REG_LIST_08(kX86RegTypeMm , 0, 8) },
{
REG(kX86RegTypeSeg, 0, 2), // Default.
REG(kX86RegTypeSeg, 1, 2), // ES.
REG(kX86RegTypeSeg, 2, 2), // CS.
REG(kX86RegTypeSeg, 3, 2), // SS.
REG(kX86RegTypeSeg, 4, 2), // DS.
REG(kX86RegTypeSeg, 5, 2), // FS.
REG(kX86RegTypeSeg, 6, 2) // GS.
},
REG(kInvalidReg, kInvalidReg, 0), // NoGp.
REG(kX86RegTypeRip, 0, 0), // RIP.
};
#undef REG_LIST_32
#undef REG_LIST_16
#undef REG_LIST_08
#undef REG_LIST_04
#undef REG
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64

View File

@ -0,0 +1,70 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
// ============================================================================
// >> INCLUDES
// ============================================================================
#ifdef _WIN32
#include <windows.h>
#endif
#ifdef __linux__
#include <sys/mman.h>
#include <unistd.h>
#define PAGE_SIZE 4096
#define ALIGN(ar) ((long)ar & ~(PAGE_SIZE-1))
#define PAGE_EXECUTE_READWRITE PROT_READ|PROT_WRITE|PROT_EXEC
#endif
#include "asm.h"
// ============================================================================
// >> ParseParams
// ============================================================================
void SetMemPatchable(void* pAddr, size_t size)
{
#if defined __linux__
mprotect((void *) ALIGN(pAddr), sysconf(_SC_PAGESIZE), PAGE_EXECUTE_READWRITE);
#elif defined _WIN32
DWORD old_prot;
VirtualProtect(pAddr, size, PAGE_EXECUTE_READWRITE, &old_prot);
#endif
}
// ============================================================================
// >> WriteJMP
// ============================================================================
void WriteJMP(unsigned char* src, void* dest)
{
SetMemPatchable(src, 20);
inject_jmp((void *)src, dest);
}

40
DynamicHooks/utilities.h Normal file
View File

@ -0,0 +1,40 @@
/**
* =============================================================================
* DynamicHooks
* Copyright (C) 2015 Robin Gohmert. All rights reserved.
* =============================================================================
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
* asm.h/cpp from devmaster.net (thanks cybermind) edited by pRED* to handle gcc
* -fPIC thunks correctly
*
* Idea and trampoline code taken from DynDetours (thanks your-name-here).
*/
#ifndef _UTILITIES_H
#define _UTILITIES_H
// ============================================================================
// >> FUNCTIONS
// ============================================================================
void SetMemPatchable(void* pAddr, size_t size);
void WriteJMP(unsigned char* src, void* dest);
#endif // _UTILITIES_H

560
dynhooks_sourcepawn.cpp Normal file
View File

@ -0,0 +1,560 @@
#include "dynhooks_sourcepawn.h"
#include "util.h"
#include <am-autoptr.h>
#include "conventions/x86MsCdecl.h"
#include "conventions/x86MsThiscall.h"
#include "conventions/x86MsStdcall.h"
#include "conventions/x86GccCdecl.h"
#include "conventions/x86GccThiscall.h"
#ifdef WIN32
typedef x86MsCdecl x86DetourCdecl;
typedef x86MsThiscall x86DetourThisCall;
typedef x86MsStdcall x86DetourStdCall;
#else
typedef x86GccCdecl x86DetourCdecl;
typedef x86GccThiscall x86DetourThisCall;
// Uhm
typedef x86MsStdcall x86DetourStdCall;
#endif
//ke::Vector<CHook *> g_pDetours;
//CallbackMap g_pPluginPreDetours;
//CallbackMap g_pPluginPostDetours;
DetourMap g_pPreDetours;
DetourMap g_pPostDetours;
void UnhookFunction(HookType_t hookType, CHook *pDetour)
{
CHookManager *pDetourManager = GetHookManager();
pDetour->RemoveCallback(hookType, (HookHandlerFn *)(void *)&HandleDetour);
if (!pDetour->AreCallbacksRegistered())
pDetourManager->UnhookFunction(pDetour->m_pFunc);
}
bool AddDetourPluginHook(HookType_t hookType, CHook *pDetour, HookSetup *setup, IPluginFunction *pCallback)
{
DetourMap *map;
if (hookType == HOOKTYPE_PRE)
map = &g_pPreDetours;
else
map = &g_pPostDetours;
// See if we already have this detour in our list.
PluginCallbackList *wrappers;
DetourMap::Insert f = map->findForAdd(pDetour);
if (f.found())
{
wrappers = f->value;
}
else
{
// Create a vector to store all the plugin callbacks in.
wrappers = new PluginCallbackList;
if (!map->add(f, pDetour, wrappers))
{
delete wrappers;
UnhookFunction(hookType, pDetour);
return false;
}
}
CDynamicHooksSourcePawn *pWrapper = new CDynamicHooksSourcePawn(setup, pDetour, pCallback, hookType == HOOKTYPE_POST);
if (!wrappers->append(pWrapper))
{
if (wrappers->empty())
{
delete wrappers;
UnhookFunction(hookType, pDetour);
map->remove(f);
}
delete pWrapper;
return false;
}
return true;
}
bool RemoveDetourPluginHook(HookType_t hookType, CHook *pDetour, IPluginFunction *pCallback)
{
DetourMap *map;
if (hookType == HOOKTYPE_PRE)
map = &g_pPreDetours;
else
map = &g_pPostDetours;
DetourMap::Result res = map->find(pDetour);
if (!res.found())
return false;
// Remove the plugin's callback
bool bRemoved = false;
PluginCallbackList *wrappers = res->value;
for (int i = wrappers->length()-1; i >= 0 ; i--)
{
CDynamicHooksSourcePawn *pWrapper = wrappers->at(i);
if (pWrapper->plugin_callback == pCallback)
{
bRemoved = true;
delete pWrapper;
wrappers->remove(i--);
}
}
// No more plugin hooks on this callback. Free our structures.
if (wrappers->empty())
{
delete wrappers;
UnhookFunction(hookType, pDetour);
map->remove(res);
}
return bRemoved;
}
void RemoveAllCallbacksForContext(HookType_t hookType, DetourMap *map, IPluginContext *pContext)
{
PluginCallbackList *wrappers;
CDynamicHooksSourcePawn *pWrapper;
DetourMap::iterator it = map->iter();
// Run through all active detours we added.
for (; !it.empty(); it.next())
{
wrappers = it->value;
// See if there are callbacks of this plugin context registered
// and remove them.
for (int i = wrappers->length() - 1; i >= 0; i--)
{
pWrapper = wrappers->at(i);
if (pWrapper->plugin_callback->GetParentContext() != pContext)
continue;
delete pWrapper;
wrappers->remove(i--);
}
// No plugin interested in this hook anymore. unhook.
if (wrappers->empty())
{
delete wrappers;
UnhookFunction(hookType, it->key);
it.erase();
}
}
}
void RemoveAllCallbacksForContext(IPluginContext *pContext)
{
RemoveAllCallbacksForContext(HOOKTYPE_PRE, &g_pPreDetours, pContext);
RemoveAllCallbacksForContext(HOOKTYPE_POST, &g_pPostDetours, pContext);
}
ICallingConvention *ConstructCallingConvention(HookSetup *setup)
{
std::vector<DataTypeSized_t> vecArgTypes;
for (size_t i = 0; i < setup->params.size(); i++)
{
ParamInfo &info = setup->params[i];
DataTypeSized_t type;
type.type = DynamicHooks_ConvertParamTypeFrom(info.type);
type.size = info.size;
vecArgTypes.push_back(type);
}
DataTypeSized_t returnType;
returnType.type = DynamicHooks_ConvertReturnTypeFrom(setup->returnType);
returnType.size = 0;
ICallingConvention *pCallConv = nullptr;
switch (setup->callConv)
{
case CallConv_CDECL:
pCallConv = new x86DetourCdecl(vecArgTypes, returnType);
break;
case CallConv_THISCALL:
pCallConv = new x86DetourThisCall(vecArgTypes, returnType);
break;
case CallConv_STDCALL:
pCallConv = new x86DetourStdCall(vecArgTypes, returnType);
break;
}
return pCallConv;
}
bool HandleDetour(HookType_t hookType, CHook* pDetour)
{
DetourMap *map;
if (hookType == HOOKTYPE_PRE)
map = &g_pPreDetours;
else
map = &g_pPostDetours;
// Find the callback list for this detour.
DetourMap::Result r = map->find(pDetour);
if (!r.found())
return false;
// List of all callbacks.
PluginCallbackList *wrappers = r->value;
HookReturnStruct *returnStruct = NULL;
Handle_t rHndl = BAD_HANDLE;
HookParamsStruct *paramStruct = NULL;
Handle_t pHndl = BAD_HANDLE;
int argNum = pDetour->m_pCallingConvention->m_vecArgTypes.size();
MRESReturn finalRet = MRES_Ignored;
ke::AutoPtr<void> finalRetBuf(new uint8_t[pDetour->m_pCallingConvention->m_returnType.size]);
// Call all the plugin functions..
for (size_t i = 0; i < wrappers->length(); i++)
{
CDynamicHooksSourcePawn *pWrapper = wrappers->at(i);
IPluginFunction *pCallback = pWrapper->plugin_callback;
MRESReturn tempRet = MRES_Ignored;
ke::AutoPtr<void> tempRetBuf(new uint8_t[pDetour->m_pCallingConvention->m_returnType.size]);
// Find the this pointer.
if (pWrapper->callConv == CallConv_THISCALL)
{
void *thisPtr = pDetour->GetArgument<void *>(0);
cell_t thisAddr = GetThisPtr(thisPtr, pWrapper->thisType);
pCallback->PushCell(thisAddr);
}
if (pWrapper->returnType != ReturnType_Void)
{
returnStruct = pWrapper->GetReturnStruct();
HandleError err;
rHndl = handlesys->CreateHandle(g_HookReturnHandle, returnStruct, pCallback->GetParentRuntime()->GetDefaultContext()->GetIdentity(), myself->GetIdentity(), &err);
if (!rHndl)
{
pCallback->Cancel();
pCallback->GetParentRuntime()->GetDefaultContext()->BlamePluginError(pCallback, "Error creating ReturnHandle in preparation to call hook callback. (error %d)", err);
if (returnStruct)
delete returnStruct;
// Don't call more callbacks. They will probably fail too.
break;
}
pCallback->PushCell(rHndl);
}
if (argNum > 0)
{
paramStruct = pWrapper->GetParamStruct();
HandleError err;
pHndl = handlesys->CreateHandle(g_HookParamsHandle, paramStruct, pCallback->GetParentRuntime()->GetDefaultContext()->GetIdentity(), myself->GetIdentity(), &err);
if (!pHndl)
{
pCallback->Cancel();
pCallback->GetParentRuntime()->GetDefaultContext()->BlamePluginError(pCallback, "Error creating ThisHandle in preparation to call hook callback. (error %d)", err);
// Don't leak our own handles here!
if (rHndl)
{
HandleSecurity sec(pCallback->GetParentRuntime()->GetDefaultContext()->GetIdentity(), myself->GetIdentity());
handlesys->FreeHandle(rHndl, &sec);
rHndl = BAD_HANDLE;
}
if (paramStruct)
delete paramStruct;
// Don't call more callbacks. They will probably fail too.
break;
}
pCallback->PushCell(pHndl);
}
cell_t result = (cell_t)MRES_Ignored;
pCallback->Execute(&result);
switch ((MRESReturn)result)
{
case MRES_Handled:
tempRet = MRES_Handled;
break;
case MRES_ChangedHandled:
tempRet = MRES_Handled;
pWrapper->UpdateParamsFromStruct(paramStruct);
break;
case MRES_ChangedOverride:
if (pWrapper->returnType != ReturnType_Void)
{
if (returnStruct->isChanged)
{
if (pWrapper->returnType == ReturnType_String || pWrapper->returnType == ReturnType_Int || pWrapper->returnType == ReturnType_Bool)
{
tempRetBuf = *(void **)returnStruct->newResult;
}
else if (pWrapper->returnType == ReturnType_Float)
{
*(float *)tempRetBuf.get() = *(float *)returnStruct->newResult;
}
else
{
tempRetBuf = returnStruct->newResult;
}
}
else //Throw an error if no override was set
{
tempRet = MRES_Ignored;
pCallback->GetParentRuntime()->GetDefaultContext()->BlamePluginError(pCallback, "Tried to override return value without return value being set");
break;
}
}
// TODO: Introduce that override concept in dyndetours.
// This doesn't call the original function at the moment, but just returns the given return value.
tempRet = MRES_Override;
pWrapper->UpdateParamsFromStruct(paramStruct);
break;
case MRES_Override:
if (pWrapper->returnType != ReturnType_Void)
{
if (returnStruct->isChanged)
{
// TODO: Introduce that override concept in dyndetours.
// This doesn't call the original function at the moment, but just returns the given return value.
tempRet = MRES_Override;
if (pWrapper->returnType == ReturnType_String || pWrapper->returnType == ReturnType_Int || pWrapper->returnType == ReturnType_Bool)
{
tempRetBuf = *(void **)returnStruct->newResult;
}
else if (pWrapper->returnType == ReturnType_Float)
{
*(float *)tempRetBuf.get() = *(float *)returnStruct->newResult;
}
else
{
tempRetBuf = returnStruct->newResult;
}
}
else //Throw an error if no override was set
{
tempRet = MRES_Ignored;
pCallback->GetParentRuntime()->GetDefaultContext()->BlamePluginError(pCallback, "Tried to override return value without return value being set");
}
}
break;
case MRES_Supercede:
if (pWrapper->returnType != ReturnType_Void)
{
if (returnStruct->isChanged)
{
tempRet = MRES_Supercede;
if (pWrapper->returnType == ReturnType_String || pWrapper->returnType == ReturnType_Int || pWrapper->returnType == ReturnType_Bool)
{
tempRetBuf = *(void **)returnStruct->newResult;
}
else if (pWrapper->returnType == ReturnType_Float)
{
*(float *)tempRetBuf.get() = *(float *)returnStruct->newResult;
}
else
{
tempRetBuf = returnStruct->newResult;
}
}
else //Throw an error if no override was set
{
tempRet = MRES_Ignored;
pCallback->GetParentRuntime()->GetDefaultContext()->BlamePluginError(pCallback, "Tried to override return value without return value being set");
}
}
else
{
tempRet = MRES_Supercede;
}
break;
default:
tempRet = MRES_Ignored;
break;
}
// Prioritize the actions.
if (finalRet <= tempRet) {
// ------------------------------------
// Copy the action and return value.
// ------------------------------------
finalRet = tempRet;
memcpy(*finalRetBuf, *tempRetBuf, pDetour->m_pCallingConvention->m_returnType.size);
}
// Free the handles again.
HandleSecurity sec(pCallback->GetParentRuntime()->GetDefaultContext()->GetIdentity(), myself->GetIdentity());
if (returnStruct)
{
handlesys->FreeHandle(rHndl, &sec);
}
if (paramStruct)
{
handlesys->FreeHandle(pHndl, &sec);
}
}
if (finalRet >= MRES_Override)
{
void* pPtr = pDetour->m_pCallingConvention->GetReturnPtr(pDetour->m_pRegisters);
memcpy(pPtr, *finalRetBuf, pDetour->m_pCallingConvention->m_returnType.size);
pDetour->m_pCallingConvention->ReturnPtrChanged(pDetour->m_pRegisters, pPtr);
}
return finalRet == MRES_Supercede;
}
CDynamicHooksSourcePawn::CDynamicHooksSourcePawn(HookSetup *setup, CHook *pDetour, IPluginFunction *pCallback, bool post)
{
this->params = setup->params;
this->offset = -1;
this->returnFlag = setup->returnFlag;
this->returnType = setup->returnType;
this->post = post;
this->plugin_callback = pCallback;
this->entity = -1;
this->thisType = setup->thisType;
this->hookType = setup->hookType;
this->m_pDetour = pDetour;
this->callConv = setup->callConv;
}
HookReturnStruct *CDynamicHooksSourcePawn::GetReturnStruct()
{
HookReturnStruct *res = new HookReturnStruct();
res->isChanged = false;
res->type = this->returnType;
res->orgResult = NULL;
res->newResult = NULL;
if (this->post)
{
switch (this->returnType)
{
case ReturnType_String:
res->orgResult = malloc(sizeof(string_t));
res->newResult = malloc(sizeof(string_t));
*(string_t *)res->orgResult = m_pDetour->GetReturnValue<string_t>();
break;
case ReturnType_Int:
res->orgResult = malloc(sizeof(int));
res->newResult = malloc(sizeof(int));
*(int *)res->orgResult = m_pDetour->GetReturnValue<int>();
break;
case ReturnType_Bool:
res->orgResult = malloc(sizeof(bool));
res->newResult = malloc(sizeof(bool));
*(bool *)res->orgResult = m_pDetour->GetReturnValue<bool>();
break;
case ReturnType_Float:
res->orgResult = malloc(sizeof(float));
res->newResult = malloc(sizeof(float));
*(float *)res->orgResult = m_pDetour->GetReturnValue<float>();
break;
case ReturnType_Vector:
{
res->orgResult = malloc(sizeof(SDKVector));
res->newResult = malloc(sizeof(SDKVector));
SDKVector vec = m_pDetour->GetReturnValue<SDKVector>();
*(SDKVector *)res->orgResult = vec;
break;
}
default:
res->orgResult = m_pDetour->GetReturnValue<void *>();
break;
}
}
else
{
switch (this->returnType)
{
case ReturnType_String:
res->orgResult = malloc(sizeof(string_t));
res->newResult = malloc(sizeof(string_t));
*(string_t *)res->orgResult = NULL_STRING;
break;
case ReturnType_Vector:
res->orgResult = malloc(sizeof(SDKVector));
res->newResult = malloc(sizeof(SDKVector));
*(SDKVector *)res->orgResult = SDKVector();
break;
case ReturnType_Int:
res->orgResult = malloc(sizeof(int));
res->newResult = malloc(sizeof(int));
*(int *)res->orgResult = 0;
break;
case ReturnType_Bool:
res->orgResult = malloc(sizeof(bool));
res->newResult = malloc(sizeof(bool));
*(bool *)res->orgResult = false;
break;
case ReturnType_Float:
res->orgResult = malloc(sizeof(float));
res->newResult = malloc(sizeof(float));
*(float *)res->orgResult = 0.0;
break;
}
}
return res;
}
HookParamsStruct *CDynamicHooksSourcePawn::GetParamStruct()
{
HookParamsStruct *params = new HookParamsStruct();
params->dg = this;
size_t paramsSize = this->m_pDetour->m_pCallingConvention->GetArgStackSize();
std::vector<DataTypeSized_t> &argTypes = m_pDetour->m_pCallingConvention->m_vecArgTypes;
int numArgs = argTypes.size();
params->orgParams = (void **)malloc(paramsSize);
params->newParams = (void **)malloc(paramsSize);
params->isChanged = (bool *)malloc(numArgs * sizeof(bool));
size_t offset = 0;
for (int i = 0; i < numArgs; i++)
{
void *pArgPtr = m_pDetour->m_pCallingConvention->GetStackArgumentPtr(m_pDetour->m_pRegisters);
memcpy(params->orgParams, pArgPtr, paramsSize);
*(void **)((intptr_t)params->newParams + offset) = NULL;
params->isChanged[i] = false;
offset += argTypes[i].size;
}
return params;
}
void CDynamicHooksSourcePawn::UpdateParamsFromStruct(HookParamsStruct *params)
{
// Function had no params to update now.
if (!params)
return;
std::vector<DataTypeSized_t> &argTypes = m_pDetour->m_pCallingConvention->m_vecArgTypes;
int numArgs = argTypes.size();
int firstArg = 0;
if (callConv == CallConv_THISCALL)
firstArg = 1;
size_t offset = 0;
for (int i = 0; i < numArgs; i++)
{
int size = argTypes[i].size;;
if (params->isChanged[i])
{
void *paramAddr = (void *)((intptr_t)params->newParams + offset);
void *stackAddr = m_pDetour->m_pCallingConvention->GetArgumentPtr(i + firstArg, m_pDetour->m_pRegisters);
memcpy(stackAddr, paramAddr, size);
}
offset += size;
}
}

41
dynhooks_sourcepawn.h Normal file
View File

@ -0,0 +1,41 @@
#ifndef _INCLUDE_DYNHOOKS_SP_H_
#define _INCLUDE_DYNHOOKS_SP_H_
#include "manager.h"
#include "vhook.h"
#include <am-vector.h>
#include <am-hashmap.h>
class CDynamicHooksSourcePawn;
typedef ke::HashMap<IPluginFunction *, CDynamicHooksSourcePawn *, ke::PointerPolicy<IPluginFunction>> CallbackMap;
typedef ke::Vector<CDynamicHooksSourcePawn *> PluginCallbackList;
typedef ke::HashMap<CHook *, PluginCallbackList *, ke::PointerPolicy<CHook>> DetourMap;
//extern ke::Vector<CHook *> g_pDetours;
// Keep a list of plugin callback -> Hook wrapper for easily removing plugin hooks
//extern CallbackMap g_pPluginPreDetours;
//extern CallbackMap g_pPluginPostDetours;
// Keep a list of hook -> callbacks for calling in the detour handler
extern DetourMap g_pPreDetours;
extern DetourMap g_pPostDetours;
class CDynamicHooksSourcePawn : public DHooksInfo {
public:
CDynamicHooksSourcePawn(HookSetup *setup, CHook *pDetour, IPluginFunction *pCallback, bool post);
HookReturnStruct *GetReturnStruct();
HookParamsStruct *CDynamicHooksSourcePawn::GetParamStruct();
void UpdateParamsFromStruct(HookParamsStruct *params);
public:
CHook *m_pDetour;
CallingConvention callConv;
};
ICallingConvention *ConstructCallingConvention(HookSetup *setup);
bool HandleDetour(HookType_t hookType, CHook* pDetour);
bool AddDetourPluginHook(HookType_t hookType, CHook *pDetour, HookSetup *setup, IPluginFunction *pCallback);
bool RemoveDetourPluginHook(HookType_t hookType, CHook *pDetour, IPluginFunction *pCallback);
void RemoveAllCallbacksForContext(IPluginContext *pContext);
#endif

View File

@ -1,5 +1,6 @@
#include "extension.h"
#include "listeners.h"
#include "dynhooks_sourcepawn.h"
DHooks g_DHooksIface; /**< Global singleton for extension's main interface */
SMEXT_LINK(&g_DHooksIface);
@ -35,6 +36,18 @@ bool DHooks::SDK_OnLoad(char *error, size_t maxlength, bool late)
return false;
}
if (!g_pPreDetours.init())
{
snprintf(error, maxlength, "Could not initialize pre hook detours hashmap.");
return false;
}
if (!g_pPostDetours.init())
{
snprintf(error, maxlength, "Could not initialize post hook detours hashmap.");
return false;
}
sharesys->AddDependency(myself, "bintools.ext", true, true);
sharesys->AddDependency(myself, "sdktools.ext", true, true);
sharesys->AddDependency(myself, "sdkhooks.ext", true, true);
@ -76,6 +89,8 @@ void DHooks::SDK_OnAllLoaded()
void DHooks::SDK_OnUnload()
{
CleanupHooks();
// FIXME: Unhook only functions that are hooked by a plugin. + cleanup
GetHookManager()->UnhookAllFunctions();
if(g_pEntityListener)
{
g_pEntityListener->CleanupListeners();
@ -102,6 +117,7 @@ bool DHooks::SDK_OnMetamodLoad(ISmmAPI *ismm, char *error, size_t maxlength, boo
void DHooks::OnPluginUnloaded(IPlugin *plugin)
{
CleanupHooks(plugin->GetBaseContext());
RemoveAllCallbacksForContext(plugin->GetBaseContext());
if(g_pEntityListener)
{
g_pEntityListener->CleanupListeners(plugin->GetBaseContext());

View File

@ -3,6 +3,7 @@
#include "extension.h"
#include "vhook.h"
#include <am-vector.h>
enum ListenType
{

View File

@ -1,5 +1,14 @@
#include "natives.h"
#include "util.h"
#include "dynhooks_sourcepawn.h"
// Must match same enum in sdktools.inc
enum SDKFuncConfSource
{
SDKConf_Virtual,
SDKConf_Signature,
SDKConf_Address
};
bool GetHandleIfValidOrError(HandleType_t type, void **object, IPluginContext *pContext, cell_t param)
{
@ -38,6 +47,77 @@ cell_t Native_CreateHook(IPluginContext *pContext, const cell_t *params)
return hndl;
}
//native Handle:DHookCreateDetour(Address:funcaddr, CallingConvention callConv, ReturnType:returntype, ThisPointerType:thistype);
cell_t Native_CreateDetour(IPluginContext *pContext, const cell_t *params)
{
HookSetup *setup = new HookSetup((ReturnType)params[3], PASSFLAG_BYVAL, (CallingConvention)params[2], (ThisPointerType)params[4], (void *)params[1]);
Handle_t hndl = handlesys->CreateHandle(g_HookSetupHandle, setup, pContext->GetIdentity(), myself->GetIdentity(), NULL);
if (!hndl)
{
delete setup;
return pContext->ThrowNativeError("Failed to create hook");
}
return hndl;
}
//native bool:DHookSetFromConf(Handle:setup, Handle:gameconf, SDKFuncConfSource:source, const String:name[]);
cell_t Native_SetFromConf(IPluginContext *pContext, const cell_t *params)
{
HookSetup *setup;
if (!GetHandleIfValidOrError(g_HookSetupHandle, (void **)&setup, pContext, params[1]))
{
return 0;
}
IGameConfig *conf;
HandleError err;
if ((conf = gameconfs->ReadHandle(params[2], pContext->GetIdentity(), &err)) == nullptr)
{
return pContext->ThrowNativeError("Invalid Handle %x (error %d)", params[2], err);
}
char *key;
pContext->LocalToString(params[4], &key);
int offset = -1;
void *addr = nullptr;;
switch (params[3])
{
case SDKConf_Virtual:
if (!conf->GetOffset(key, &offset))
{
return 0;
}
break;
case SDKConf_Signature:
if (!conf->GetMemSig(key, &addr) || !addr)
{
return 0;
}
break;
case SDKConf_Address:
if (!conf->GetAddress(key, &addr) || !addr)
{
return 0;
}
break;
default:
return pContext->ThrowNativeError("Unknown SDKFuncConfSource: %d", params[3]);
}
// Save the new info. This always invalidates the other option.
// Detour or vhook.
setup->funcAddr = addr;
setup->offset = offset;
return 1;
}
//native bool:DHookAddParam(Handle:setup, HookParamType:type); OLD
//native bool:DHookAddParam(Handle:setup, HookParamType:type, size=-1, DHookPassFlag:flag=DHookPass_ByVal);
cell_t Native_AddParam(IPluginContext *pContext, const cell_t *params)
@ -80,6 +160,87 @@ cell_t Native_AddParam(IPluginContext *pContext, const cell_t *params)
return 1;
}
// native bool:DHookEnableDetour(Handle:setup, bool:post, DHookCallback:callback);
cell_t Native_EnableDetour(IPluginContext *pContext, const cell_t *params)
{
HookSetup *setup;
if (!GetHandleIfValidOrError(g_HookSetupHandle, (void **)&setup, pContext, params[1]))
{
return 0;
}
if (setup->funcAddr == nullptr)
{
return pContext->ThrowNativeError("Hook not setup for a detour.");
}
IPluginFunction *callback = pContext->GetFunctionById(params[3]);
if (!callback)
{
return pContext->ThrowNativeError("Failed to retrieve function by id");
}
bool post = params[2] != 0;
HookType_t hookType = post ? HOOKTYPE_POST : HOOKTYPE_PRE;
// Check if we already detoured that function.
CHookManager *pDetourManager = GetHookManager();
CHook* pDetour = pDetourManager->FindHook(setup->funcAddr);
// If there is no detour on this function yet, create it.
if (!pDetour)
{
ICallingConvention *pCallConv = ConstructCallingConvention(setup);
pDetour = pDetourManager->HookFunction(setup->funcAddr, pCallConv);
}
// Register our pre/post handler.
pDetour->AddCallback(hookType, (HookHandlerFn *)&HandleDetour);
// Add the plugin callback to the map.
return AddDetourPluginHook(hookType, pDetour, setup, callback);
}
// native bool:DHookDisableDetour(Handle:setup, bool:post, DHookCallback:callback);
cell_t Native_DisableDetour(IPluginContext *pContext, const cell_t *params)
{
HookSetup *setup;
if (!GetHandleIfValidOrError(g_HookSetupHandle, (void **)&setup, pContext, params[1]))
{
return 0;
}
if (setup->funcAddr == nullptr)
{
return pContext->ThrowNativeError("Hook not setup for a detour.");
}
IPluginFunction *callback = pContext->GetFunctionById(params[3]);
if (!callback)
{
return pContext->ThrowNativeError("Failed to retrieve function by id");
}
bool post = params[2] != 0;
HookType_t hookType = post ? HOOKTYPE_POST : HOOKTYPE_PRE;
// Check if we already detoured that function.
CHookManager *pDetourManager = GetHookManager();
CHook* pDetour = pDetourManager->FindHook(setup->funcAddr);
if (!pDetour || !pDetour->IsCallbackRegistered(hookType, (HookHandlerFn *)&HandleDetour))
{
return pContext->ThrowNativeError("Function not detoured.");
}
// Remove the callback from the hook.
return RemoveDetourPluginHook(hookType, pDetour, callback);
}
// native DHookEntity(Handle:setup, bool:post, entity, DHookRemovalCB:removalcb);
cell_t Native_HookEntity(IPluginContext *pContext, const cell_t *params)
{
@ -90,6 +251,11 @@ cell_t Native_HookEntity(IPluginContext *pContext, const cell_t *params)
return 0;
}
if (setup->offset == -1)
{
return pContext->ThrowNativeError("Hook not setup for a virtual hook.");
}
if(setup->hookType != HookType_Entity)
{
return pContext->ThrowNativeError("Hook is not an entity hook");
@ -133,6 +299,11 @@ cell_t Native_HookGamerules(IPluginContext *pContext, const cell_t *params)
return 0;
}
if (setup->offset == -1)
{
return pContext->ThrowNativeError("Hook not setup for a virtual hook.");
}
if(setup->hookType != HookType_GameRules)
{
return pContext->ThrowNativeError("Hook is not a gamerules hook");
@ -178,6 +349,11 @@ cell_t Native_HookRaw(IPluginContext *pContext, const cell_t *params)
return 0;
}
if (setup->offset == -1)
{
return pContext->ThrowNativeError("Hook not setup for a virtual hook.");
}
if(setup->hookType != HookType_Raw)
{
return pContext->ThrowNativeError("Hook is not a raw hook");
@ -1036,7 +1212,11 @@ cell_t Native_IsNullParam(IPluginContext *pContext, const cell_t *params)
sp_nativeinfo_t g_Natives[] =
{
{"DHookCreate", Native_CreateHook},
{"DHookCreateDetour", Native_CreateDetour},
{"DHookSetFromConf", Native_SetFromConf },
{"DHookAddParam", Native_AddParam},
{"DHookEnableDetour", Native_EnableDetour},
//{"DHookDisableDetour", Native_DisableDetour},
{"DHookEntity", Native_HookEntity},
{"DHookGamerules", Native_HookGamerules},
{"DHookRaw", Native_HookRaw},

View File

@ -63,7 +63,7 @@
#define SMEXT_ENABLE_HANDLESYS
#define SMEXT_ENABLE_PLAYERHELPERS
//#define SMEXT_ENABLE_DBMANAGER
//#define SMEXT_ENABLE_GAMECONF
#define SMEXT_ENABLE_GAMECONF
//#define SMEXT_ENABLE_MEMUTILS
#define SMEXT_ENABLE_GAMEHELPERS
//#define SMEXT_ENABLE_TIMERSYS

View File

@ -72,6 +72,13 @@ enum HookType
HookType_Raw
};
enum CallingConvention
{
CallConv_CDECL,
CallConv_THISCALL,
CallConv_STDCALL,
};
enum MRESReturn
{
MRES_ChangedHandled = -2, // Use changed values and return MRES_Handled
@ -176,6 +183,10 @@ native bool DHookRemoveEntityListener(ListenType type, ListenCB callback);
*/
native Handle DHookCreate(int offset, HookType hooktype, ReturnType returntype, ThisPointerType thistype, DHookCallback callback);
native Handle DHookCreateDetour(Address funcaddr, CallingConvention callConv, ReturnType returntype, ThisPointerType thistype);
native bool DHookSetFromConf(Handle setup, Handle gameconf, SDKFuncConfSource source, const char[] name);
native bool:DHookEnableDetour(Handle:setup, bool:post, DHookCallback callback);
/* Adds param to a hook setup
*
* @param setup Setup handle to add the param to.

View File

@ -46,3 +46,56 @@ size_t GetParamsSize(DHooksCallback *dg)//Get the full size, this is for creatin
return res;
}
DataType_t DynamicHooks_ConvertParamTypeFrom(HookParamType type)
{
switch (type)
{
case HookParamType_Int:
return DATA_TYPE_INT;
case HookParamType_Bool:
return DATA_TYPE_BOOL;
case HookParamType_Float:
return DATA_TYPE_FLOAT;
case HookParamType_StringPtr:
case HookParamType_CharPtr:
case HookParamType_VectorPtr:
case HookParamType_CBaseEntity:
case HookParamType_ObjectPtr:
case HookParamType_Edict:
return DATA_TYPE_POINTER;
case HookParamType_Object:
return DATA_TYPE_OBJECT;
default:
smutils->LogError(myself, "Unhandled parameter type %d!", type);
}
return DATA_TYPE_POINTER;
}
DataType_t DynamicHooks_ConvertReturnTypeFrom(ReturnType type)
{
switch (type)
{
case ReturnType_Void:
return DATA_TYPE_VOID;
case ReturnType_Int:
return DATA_TYPE_INT;
case ReturnType_Bool:
return DATA_TYPE_BOOL;
case ReturnType_Float:
return DATA_TYPE_FLOAT;
case ReturnType_StringPtr:
case ReturnType_CharPtr:
case ReturnType_VectorPtr:
case ReturnType_CBaseEntity:
case ReturnType_Edict:
return DATA_TYPE_POINTER;
case ReturnType_Vector:
return DATA_TYPE_OBJECT;
default:
smutils->LogError(myself, "Unhandled return type %d!", type);
}
return DATA_TYPE_VOID;
}

4
util.h
View File

@ -2,9 +2,13 @@
#define _INCLUDE_UTIL_FUNCTIONS_H_
#include "vhook.h"
#include "convention.h"
size_t GetParamOffset(HookParamsStruct *params, unsigned int index);
void * GetObjectAddr(HookParamType type, unsigned int flags, void **params, size_t offset);
size_t GetParamTypeSize(HookParamType type);
size_t GetParamsSize(DHooksCallback *dg);
DataType_t DynamicHooks_ConvertParamTypeFrom(HookParamType type);
DataType_t DynamicHooks_ConvertReturnTypeFrom(ReturnType type);
#endif

114
vhook.cpp
View File

@ -1,6 +1,7 @@
#include "vhook.h"
#include "vfunc_call.h"
#include "util.h"
#include <macro-assembler-x86.h>
SourceHook::IHookManagerAutoGen *g_pHookManager = NULL;
@ -14,6 +15,95 @@ using namespace SourceHook;
#define OBJECT_OFFSET (sizeof(void *)*2)
#endif
#ifndef WIN32
void *GenerateThunk(ReturnType type)
{
sp::MacroAssembler masm;
static const size_t kStackNeeded = (2) * 4; // 2 args max
static const size_t kReserve = ke::Align(kStackNeeded + 8, 16) - 8;
masm.push(ebp);
masm.movl(ebp, esp);
masm.subl(esp, kReserve);
if (type != ReturnType_String && type != ReturnType_Vector)
{
masm.lea(eax, Operand(ebp, 12)); // grab the incoming caller argument vector
masm.movl(Operand(esp, 1 * 4), eax); // set that as the 2nd argument
masm.movl(eax, Operand(ebp, 8)); // grab the |this|
masm.movl(Operand(esp, 0 * 4), eax); // set |this| as the 1st argument*/
}
else
{
masm.lea(eax, Operand(ebp, 8)); // grab the incoming caller argument vector
masm.movl(Operand(esp, 1 * 4), eax); // set that as the 2nd argument
masm.movl(eax, Operand(ebp, 12)); // grab the |this|
masm.movl(Operand(esp, 0 * 4), eax); // set |this| as the 1st argument*/
}
if (type == ReturnType_Float)
{
masm.call(ExternalAddress((void *)Callback_float));
}
else if (type == ReturnType_Vector)
{
masm.call(ExternalAddress((void *)Callback_vector));
}
else if (type == ReturnType_String)
{
masm.call(ExternalAddress((void *)Callback_stringt));
}
else
{
masm.call(ExternalAddress((void *)Callback));
}
masm.addl(esp, kReserve);
masm.pop(ebp); // restore ebp
masm.ret();
void *base = g_pSM->GetScriptingEngine()->AllocatePageMemory(masm.length());
masm.emitToExecutableMemory(base);
return base;
}
#else
// HUGE THANKS TO BAILOPAN (dvander)!
void *GenerateThunk(ReturnType type)
{
sp::MacroAssembler masm;
static const size_t kStackNeeded = (3 + 1) * 4; // 3 args max, 1 locals max
static const size_t kReserve = ke::Align(kStackNeeded + 8, 16) - 8;
masm.push(ebp);
masm.movl(ebp, esp);
masm.subl(esp, kReserve);
masm.lea(eax, Operand(esp, 3 * 4)); // ptr to 2nd var after argument space
masm.movl(Operand(esp, 2 * 4), eax); // set the ptr as the third argument
masm.lea(eax, Operand(ebp, 8)); // grab the incoming caller argument vector
masm.movl(Operand(esp, 1 * 4), eax); // set that as the 2nd argument
masm.movl(Operand(esp, 0 * 4), ecx); // set |this| as the 1st argument
if (type == ReturnType_Float)
{
masm.call(ExternalAddress(Callback_float));
}
else if (type == ReturnType_Vector)
{
masm.call(ExternalAddress(Callback_vector));
}
else
{
masm.call(ExternalAddress(Callback));
}
masm.movl(ecx, Operand(esp, 3 * 4));
masm.addl(esp, kReserve);
masm.pop(ebp); // restore ebp
masm.pop(edx); // grab return address in edx
masm.addl(esp, ecx); // remove arguments
masm.jmp(edx); // return to caller
void *base = g_pSM->GetScriptingEngine()->AllocatePageMemory(masm.length());
masm.emitToExecutableMemory(base);
return base;
}
#endif
DHooksManager::DHooksManager(HookSetup *setup, void *iface, IPluginFunction *remove_callback, bool post)
{
this->callback = MakeHandler(setup->returnType);
@ -121,6 +211,27 @@ size_t GetStackArgsSize(DHooksCallback *dg)
return res;
}
HookReturnStruct::~HookReturnStruct()
{
if (this->type == ReturnType_String || this->type == ReturnType_Int || this->type == ReturnType_Bool || this->type == ReturnType_Float || this->type == ReturnType_Vector)
{
free(this->newResult);
free(this->orgResult);
}
else if (this->isChanged)
{
if (this->type == ReturnType_CharPtr)
{
delete[](char *)this->newResult;
}
else if (this->type == ReturnType_VectorPtr)
{
delete (SDKVector *)this->newResult;
}
}
}
HookParamsStruct::~HookParamsStruct()
{
if (this->orgParams != NULL)
@ -329,7 +440,8 @@ void *Callback(DHooksCallback *dg, void **argStack)
dg->plugin_callback->Cancel();
if(returnStruct)
{
delete returnStruct;
HandleSecurity sec(dg->plugin_callback->GetParentRuntime()->GetDefaultContext()->GetIdentity(), myself->GetIdentity());
handlesys->FreeHandle(rHndl, &sec);
}
if(paramStruct)
{

143
vhook.h
View File

@ -3,7 +3,13 @@
#include "extension.h"
#include <sourcehook.h>
#include <macro-assembler-x86.h>
enum CallingConvention
{
CallConv_CDECL,
CallConv_THISCALL,
CallConv_STDCALL,
};
enum MRESReturn
{
@ -86,29 +92,16 @@ struct ParamInfo
SourceHook::PassInfo::PassType pass_type;
};
#ifdef WIN32
#define OBJECT_OFFSET sizeof(void *)
#else
#define OBJECT_OFFSET (sizeof(void *)*2)
#endif
class HookReturnStruct
{
public:
~HookReturnStruct()
{
if(this->type == ReturnType_String || this->type == ReturnType_Int || this->type == ReturnType_Bool || this->type == ReturnType_Float || this->type == ReturnType_Vector)
{
free(this->newResult);
free(this->orgResult);
}
else if(this->isChanged)
{
if(this->type == ReturnType_CharPtr)
{
delete [] (char *)this->newResult;
}
else if(this->type == ReturnType_VectorPtr)
{
delete (SDKVector *)this->newResult;
}
}
}
~HookReturnStruct();
public:
ReturnType type;
bool isChanged;
@ -162,95 +155,7 @@ bool SetupHookManager(ISmmAPI *ismm);
void CleanupHooks(IPluginContext *pContext = NULL);
size_t GetParamTypeSize(HookParamType type);
SourceHook::PassInfo::PassType GetParamTypePassType(HookParamType type);
#ifndef WIN32
static void *GenerateThunk(ReturnType type)
{
sp::MacroAssemblerX86 masm;
static const size_t kStackNeeded = (2) * 4; // 2 args max
static const size_t kReserve = ke::Align(kStackNeeded+8, 16)-8;
masm.push(ebp);
masm.movl(ebp, esp);
masm.subl(esp, kReserve);
if(type != ReturnType_String && type != ReturnType_Vector)
{
masm.lea(eax, Operand(ebp, 12)); // grab the incoming caller argument vector
masm.movl(Operand(esp, 1 * 4), eax); // set that as the 2nd argument
masm.movl(eax, Operand(ebp, 8)); // grab the |this|
masm.movl(Operand(esp, 0 * 4), eax); // set |this| as the 1st argument*/
}
else
{
masm.lea(eax, Operand(ebp, 8)); // grab the incoming caller argument vector
masm.movl(Operand(esp, 1 * 4), eax); // set that as the 2nd argument
masm.movl(eax, Operand(ebp, 12)); // grab the |this|
masm.movl(Operand(esp, 0 * 4), eax); // set |this| as the 1st argument*/
}
if(type == ReturnType_Float)
{
masm.call(ExternalAddress((void *)Callback_float));
}
else if(type == ReturnType_Vector)
{
masm.call(ExternalAddress((void *)Callback_vector));
}
else if(type == ReturnType_String)
{
masm.call(ExternalAddress((void *)Callback_stringt));
}
else
{
masm.call(ExternalAddress((void *)Callback));
}
masm.addl(esp, kReserve);
masm.pop(ebp); // restore ebp
masm.ret();
void *base = g_pSM->GetScriptingEngine()->AllocatePageMemory(masm.length());
masm.emitToExecutableMemory(base);
return base;
}
#else
// HUGE THANKS TO BAILOPAN (dvander)!
static void *GenerateThunk(ReturnType type)
{
sp::MacroAssemblerX86 masm;
static const size_t kStackNeeded = (3 + 1) * 4; // 3 args max, 1 locals max
static const size_t kReserve = ke::Align(kStackNeeded+8, 16)-8;
masm.push(ebp);
masm.movl(ebp, esp);
masm.subl(esp, kReserve);
masm.lea(eax, Operand(esp, 3 * 4)); // ptr to 2nd var after argument space
masm.movl(Operand(esp, 2 * 4), eax); // set the ptr as the third argument
masm.lea(eax, Operand(ebp, 8)); // grab the incoming caller argument vector
masm.movl(Operand(esp, 1 * 4), eax); // set that as the 2nd argument
masm.movl(Operand(esp, 0 * 4), ecx); // set |this| as the 1st argument
if(type == ReturnType_Float)
{
masm.call(ExternalAddress(Callback_float));
}
else if(type == ReturnType_Vector)
{
masm.call(ExternalAddress(Callback_vector));
}
else
{
masm.call(ExternalAddress(Callback));
}
masm.movl(ecx, Operand(esp, 3*4));
masm.addl(esp, kReserve);
masm.pop(ebp); // restore ebp
masm.pop(edx); // grab return address in edx
masm.addl(esp, ecx); // remove arguments
masm.jmp(edx); // return to caller
void *base = g_pSM->GetScriptingEngine()->AllocatePageMemory(masm.length());
masm.emitToExecutableMemory(base);
return base;
}
#endif
void *GenerateThunk(ReturnType type);
static DHooksCallback *MakeHandler(ReturnType type)
{
@ -280,7 +185,7 @@ public:
void **orgParams;
void **newParams;
bool *isChanged;
DHooksCallback *dg;
DHooksInfo *dg;
};
class HookSetup
@ -291,18 +196,33 @@ public:
this->returnType = returnType;
this->returnFlag = returnFlag;
this->hookType = hookType;
this->callConv = CallConv_THISCALL;
this->thisType = thisType;
this->offset = offset;
this->funcAddr = nullptr;
this->callback = callback;
};
HookSetup(ReturnType returnType, unsigned int returnFlag, CallingConvention callConv, ThisPointerType thisType, void *funcAddr)
{
this->returnType = returnType;
this->returnFlag = returnFlag;
this->hookType = HookType_Raw;
this->callConv = callConv;
this->thisType = thisType;
this->offset = -1;
this->funcAddr = funcAddr;
this->callback = nullptr;
};
~HookSetup(){};
public:
unsigned int returnFlag;
ReturnType returnType;
HookType hookType;
CallingConvention callConv;
ThisPointerType thisType;
SourceHook::CVector<ParamInfo> params;
int offset;
void *funcAddr;
IPluginFunction *callback;
};
@ -335,6 +255,7 @@ public:
};
size_t GetStackArgsSize(DHooksCallback *dg);
cell_t GetThisPtr(void *iface, ThisPointerType type);
extern IBinTools *g_pBinTools;
extern HandleType_t g_HookParamsHandle;