e07c120cab
* Add safetyhook, remove libudis86 Co-authored-by: bottiger1 <55270538+bottiger1@users.noreply.github.com> * Add modified CDetour Co-authored-by: bottiger1 <55270538+bottiger1@users.noreply.github.com> * Add CDetour [Safetyhook] to build script * Re-enable loader/core/corelogic, and fix new C++20 error * Reenable all extensions (except dhooks) * Make cstrike compile against new CDetour * Remove unused variable in sdktools output? * Make sdktools compile against new cdetour * Downgrade to C++17 * remove auto * fix compilation on linux * Re-enable dhooks * Re-authorise old compilers * Fix invalid downgrade of std::optional * readd libudis86 for dhooks only --------- Co-authored-by: Kenzzer <kenzzer@users.noreply.github.com> Co-authored-by: bottiger1 <55270538+bottiger1@users.noreply.github.com>
1731 lines
53 KiB
C++
1731 lines
53 KiB
C++
// DO NOT EDIT. This file is auto-generated by `amalgamate.py`.
|
|
|
|
#define NOMINMAX
|
|
|
|
#include "safetyhook.hpp"
|
|
|
|
|
|
//
|
|
// Source file: inline_hook.cpp
|
|
//
|
|
|
|
#include <iterator>
|
|
|
|
#if __has_include("Zydis/Zydis.h")
|
|
#include "Zydis/Zydis.h"
|
|
#elif __has_include("Zydis.h")
|
|
#include "Zydis.h"
|
|
#else
|
|
#error "Zydis not found"
|
|
#endif
|
|
|
|
|
|
//
|
|
// Header: safetyhook/os.hpp
|
|
//
|
|
|
|
// This is the OS abstraction layer.
|
|
//#pragma once
|
|
|
|
#ifndef SAFETYHOOK_USE_CXXMODULES
|
|
#include <cstdint>
|
|
#include <expected.hpp>
|
|
#include <functional>
|
|
#else
|
|
import std.compat;
|
|
#endif
|
|
|
|
namespace safetyhook {
|
|
|
|
enum class OsError {
|
|
FAILED_TO_ALLOCATE,
|
|
FAILED_TO_PROTECT,
|
|
FAILED_TO_QUERY,
|
|
FAILED_TO_GET_NEXT_THREAD,
|
|
FAILED_TO_GET_THREAD_CONTEXT,
|
|
FAILED_TO_SET_THREAD_CONTEXT,
|
|
FAILED_TO_FREEZE_THREAD,
|
|
FAILED_TO_UNFREEZE_THREAD,
|
|
FAILED_TO_GET_THREAD_ID,
|
|
};
|
|
|
|
struct VmAccess {
|
|
bool read : 1;
|
|
bool write : 1;
|
|
bool execute : 1;
|
|
|
|
constexpr VmAccess() : read(true), write(true), execute(true) {};
|
|
constexpr VmAccess(bool pread, bool pwrite, bool pexecute) : read(pread), write(pwrite), execute(pexecute) {};
|
|
|
|
constexpr bool operator==(const VmAccess& other) const {
|
|
return read == other.read && write == other.write && execute == other.execute;
|
|
}
|
|
};
|
|
|
|
constexpr VmAccess VM_ACCESS_R(true, false, false);
|
|
constexpr VmAccess VM_ACCESS_RW(true, true, false);
|
|
constexpr VmAccess VM_ACCESS_RX(true, false, true);
|
|
constexpr VmAccess VM_ACCESS_RWX(true, true, true);
|
|
|
|
struct VmBasicInfo {
|
|
uint8_t* address;
|
|
size_t size;
|
|
VmAccess access;
|
|
bool is_free;
|
|
|
|
constexpr VmBasicInfo() : address(nullptr), size(0), access(VM_ACCESS_RWX), is_free(false) {}
|
|
};
|
|
|
|
tl::expected<uint8_t*, OsError> vm_allocate(uint8_t* address, size_t size, VmAccess access);
|
|
void vm_free(uint8_t* address);
|
|
tl::expected<uint32_t, OsError> vm_protect(uint8_t* address, size_t size, VmAccess access);
|
|
tl::expected<uint32_t, OsError> vm_protect(uint8_t* address, size_t size, uint32_t access);
|
|
tl::expected<VmBasicInfo, OsError> vm_query(uint8_t* address);
|
|
bool vm_is_readable(uint8_t* address, size_t size);
|
|
bool vm_is_writable(uint8_t* address, size_t size);
|
|
bool vm_is_executable(uint8_t* address);
|
|
|
|
struct SystemInfo {
|
|
uint32_t page_size;
|
|
uint32_t allocation_granularity;
|
|
uint8_t* min_address;
|
|
uint8_t* max_address;
|
|
};
|
|
|
|
SystemInfo system_info();
|
|
|
|
using ThreadContext = void*;
|
|
|
|
void trap_threads(uint8_t* from, uint8_t* to, size_t len, const std::function<void()>& run_fn);
|
|
|
|
/// @brief Will modify the context of a thread's IP to point to a new address if its IP is at the old address.
|
|
/// @param ctx The thread context to modify.
|
|
/// @param old_ip The old IP address.
|
|
/// @param new_ip The new IP address.
|
|
void fix_ip(ThreadContext ctx, uint8_t* old_ip, uint8_t* new_ip);
|
|
|
|
} // namespace safetyhook
|
|
|
|
|
|
namespace safetyhook {
|
|
|
|
#pragma pack(push, 1)
|
|
struct JmpE9 {
|
|
uint8_t opcode{0xE9};
|
|
uint32_t offset{0};
|
|
};
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
struct JmpFF {
|
|
uint8_t opcode0{0xFF};
|
|
uint8_t opcode1{0x25};
|
|
uint32_t offset{0};
|
|
};
|
|
|
|
struct TrampolineEpilogueE9 {
|
|
JmpE9 jmp_to_original{};
|
|
JmpFF jmp_to_destination{};
|
|
uint64_t destination_address{};
|
|
};
|
|
|
|
struct TrampolineEpilogueFF {
|
|
JmpFF jmp_to_original{};
|
|
uint64_t original_address{};
|
|
};
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
struct TrampolineEpilogueE9 {
|
|
JmpE9 jmp_to_original{};
|
|
JmpE9 jmp_to_destination{};
|
|
};
|
|
#endif
|
|
#pragma pack(pop)
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
static auto make_jmp_ff(uint8_t* src, uint8_t* dst, uint8_t* data) {
|
|
JmpFF jmp{};
|
|
|
|
jmp.offset = static_cast<uint32_t>(data - src - sizeof(jmp));
|
|
store(data, dst);
|
|
|
|
return jmp;
|
|
}
|
|
|
|
[[nodiscard]] static tl::expected<void, InlineHook::Error> emit_jmp_ff(
|
|
uint8_t* src, uint8_t* dst, uint8_t* data, size_t size = sizeof(JmpFF)) {
|
|
if (size < sizeof(JmpFF)) {
|
|
return tl::unexpected{InlineHook::Error::not_enough_space(dst)};
|
|
}
|
|
|
|
if (size > sizeof(JmpFF)) {
|
|
std::fill_n(src, size, static_cast<uint8_t>(0x90));
|
|
}
|
|
|
|
store(src, make_jmp_ff(src, dst, data));
|
|
|
|
return {};
|
|
}
|
|
#endif
|
|
|
|
constexpr auto make_jmp_e9(uint8_t* src, uint8_t* dst) {
|
|
JmpE9 jmp{};
|
|
|
|
jmp.offset = static_cast<uint32_t>(dst - src - sizeof(jmp));
|
|
|
|
return jmp;
|
|
}
|
|
|
|
[[nodiscard]] static tl::expected<void, InlineHook::Error> emit_jmp_e9(
|
|
uint8_t* src, uint8_t* dst, size_t size = sizeof(JmpE9)) {
|
|
if (size < sizeof(JmpE9)) {
|
|
return tl::unexpected{InlineHook::Error::not_enough_space(dst)};
|
|
}
|
|
|
|
if (size > sizeof(JmpE9)) {
|
|
std::fill_n(src, size, static_cast<uint8_t>(0x90));
|
|
}
|
|
|
|
store(src, make_jmp_e9(src, dst));
|
|
|
|
return {};
|
|
}
|
|
|
|
static bool decode(ZydisDecodedInstruction* ix, uint8_t* ip) {
|
|
ZydisDecoder decoder{};
|
|
ZyanStatus status;
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
status = ZydisDecoderInit(&decoder, ZYDIS_MACHINE_MODE_LONG_64, ZYDIS_STACK_WIDTH_64);
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
status = ZydisDecoderInit(&decoder, ZYDIS_MACHINE_MODE_LEGACY_32, ZYDIS_STACK_WIDTH_32);
|
|
#endif
|
|
|
|
if (!ZYAN_SUCCESS(status)) {
|
|
return false;
|
|
}
|
|
|
|
return ZYAN_SUCCESS(ZydisDecoderDecodeInstruction(&decoder, nullptr, ip, 15, ix));
|
|
}
|
|
|
|
tl::expected<InlineHook, InlineHook::Error> InlineHook::create(void* target, void* destination, Flags flags) {
|
|
return create(Allocator::global(), target, destination, flags);
|
|
}
|
|
|
|
tl::expected<InlineHook, InlineHook::Error> InlineHook::create(
|
|
const std::shared_ptr<Allocator>& allocator, void* target, void* destination, Flags flags) {
|
|
InlineHook hook{};
|
|
|
|
if (const auto setup_result =
|
|
hook.setup(allocator, reinterpret_cast<uint8_t*>(target), reinterpret_cast<uint8_t*>(destination));
|
|
!setup_result) {
|
|
return tl::unexpected{setup_result.error()};
|
|
}
|
|
|
|
if (!(flags & StartDisabled)) {
|
|
if (auto enable_result = hook.enable(); !enable_result) {
|
|
return tl::unexpected{enable_result.error()};
|
|
}
|
|
}
|
|
|
|
return hook;
|
|
}
|
|
|
|
InlineHook::InlineHook(InlineHook&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
InlineHook& InlineHook::operator=(InlineHook&& other) noexcept {
|
|
if (this != &other) {
|
|
destroy();
|
|
|
|
std::scoped_lock lock{m_mutex, other.m_mutex};
|
|
|
|
m_target = other.m_target;
|
|
m_destination = other.m_destination;
|
|
m_trampoline = std::move(other.m_trampoline);
|
|
m_trampoline_size = other.m_trampoline_size;
|
|
m_original_bytes = std::move(other.m_original_bytes);
|
|
m_enabled = other.m_enabled;
|
|
m_type = other.m_type;
|
|
|
|
other.m_target = nullptr;
|
|
other.m_destination = nullptr;
|
|
other.m_trampoline_size = 0;
|
|
other.m_enabled = false;
|
|
other.m_type = Type::Unset;
|
|
}
|
|
|
|
return *this;
|
|
}
|
|
|
|
InlineHook::~InlineHook() {
|
|
destroy();
|
|
}
|
|
|
|
void InlineHook::reset() {
|
|
*this = {};
|
|
}
|
|
|
|
tl::expected<void, InlineHook::Error> InlineHook::setup(
|
|
const std::shared_ptr<Allocator>& allocator, uint8_t* target, uint8_t* destination) {
|
|
m_target = target;
|
|
m_destination = destination;
|
|
|
|
if (auto e9_result = e9_hook(allocator); !e9_result) {
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
if (auto ff_result = ff_hook(allocator); !ff_result) {
|
|
return ff_result;
|
|
}
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
return e9_result;
|
|
#endif
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
tl::expected<void, InlineHook::Error> InlineHook::e9_hook(const std::shared_ptr<Allocator>& allocator) {
|
|
m_original_bytes.clear();
|
|
m_trampoline_size = sizeof(TrampolineEpilogueE9);
|
|
|
|
std::vector<uint8_t*> desired_addresses{m_target};
|
|
ZydisDecodedInstruction ix{};
|
|
|
|
for (auto ip = m_target; ip < m_target + sizeof(JmpE9); ip += ix.length) {
|
|
if (!decode(&ix, ip)) {
|
|
return tl::unexpected{Error::failed_to_decode_instruction(ip)};
|
|
}
|
|
|
|
m_trampoline_size += ix.length;
|
|
m_original_bytes.insert(m_original_bytes.end(), ip, ip + ix.length);
|
|
|
|
const auto is_relative = (ix.attributes & ZYDIS_ATTRIB_IS_RELATIVE) != 0;
|
|
|
|
if (is_relative) {
|
|
if (ix.raw.disp.size == 32) {
|
|
const auto target_address = ip + ix.length + static_cast<int32_t>(ix.raw.disp.value);
|
|
desired_addresses.emplace_back(target_address);
|
|
} else if (ix.raw.imm[0].size == 32) {
|
|
const auto target_address = ip + ix.length + static_cast<int32_t>(ix.raw.imm[0].value.s);
|
|
desired_addresses.emplace_back(target_address);
|
|
} else if (ix.meta.category == ZYDIS_CATEGORY_COND_BR && ix.meta.branch_type == ZYDIS_BRANCH_TYPE_SHORT) {
|
|
const auto target_address = ip + ix.length + static_cast<int32_t>(ix.raw.imm[0].value.s);
|
|
desired_addresses.emplace_back(target_address);
|
|
m_trampoline_size += 4; // near conditional branches are 4 bytes larger.
|
|
} else if (ix.meta.category == ZYDIS_CATEGORY_UNCOND_BR && ix.meta.branch_type == ZYDIS_BRANCH_TYPE_SHORT) {
|
|
const auto target_address = ip + ix.length + static_cast<int32_t>(ix.raw.imm[0].value.s);
|
|
desired_addresses.emplace_back(target_address);
|
|
m_trampoline_size += 3; // near unconditional branches are 3 bytes larger.
|
|
} else {
|
|
return tl::unexpected{Error::unsupported_instruction_in_trampoline(ip)};
|
|
}
|
|
}
|
|
}
|
|
|
|
auto trampoline_allocation = allocator->allocate_near(desired_addresses, m_trampoline_size);
|
|
|
|
if (!trampoline_allocation) {
|
|
return tl::unexpected{Error::bad_allocation(trampoline_allocation.error())};
|
|
}
|
|
|
|
m_trampoline = std::move(*trampoline_allocation);
|
|
|
|
for (auto ip = m_target, tramp_ip = m_trampoline.data(); ip < m_target + m_original_bytes.size(); ip += ix.length) {
|
|
if (!decode(&ix, ip)) {
|
|
m_trampoline.free();
|
|
return tl::unexpected{Error::failed_to_decode_instruction(ip)};
|
|
}
|
|
|
|
const auto is_relative = (ix.attributes & ZYDIS_ATTRIB_IS_RELATIVE) != 0;
|
|
|
|
if (is_relative && ix.raw.disp.size == 32) {
|
|
std::copy_n(ip, ix.length, tramp_ip);
|
|
const auto target_address = ip + ix.length + ix.raw.disp.value;
|
|
const auto new_disp = target_address - (tramp_ip + ix.length);
|
|
store(tramp_ip + ix.raw.disp.offset, static_cast<int32_t>(new_disp));
|
|
tramp_ip += ix.length;
|
|
} else if (is_relative && ix.raw.imm[0].size == 32) {
|
|
std::copy_n(ip, ix.length, tramp_ip);
|
|
const auto target_address = ip + ix.length + ix.raw.imm[0].value.s;
|
|
const auto new_disp = target_address - (tramp_ip + ix.length);
|
|
store(tramp_ip + ix.raw.imm[0].offset, static_cast<int32_t>(new_disp));
|
|
tramp_ip += ix.length;
|
|
} else if (ix.meta.category == ZYDIS_CATEGORY_COND_BR && ix.meta.branch_type == ZYDIS_BRANCH_TYPE_SHORT) {
|
|
const auto target_address = ip + ix.length + ix.raw.imm[0].value.s;
|
|
auto new_disp = target_address - (tramp_ip + 6);
|
|
|
|
// Handle the case where the target is now in the trampoline.
|
|
if (target_address < m_target + m_original_bytes.size()) {
|
|
new_disp = static_cast<ptrdiff_t>(ix.raw.imm[0].value.s);
|
|
}
|
|
|
|
*tramp_ip = 0x0F;
|
|
*(tramp_ip + 1) = 0x10 + ix.opcode;
|
|
store(tramp_ip + 2, static_cast<int32_t>(new_disp));
|
|
tramp_ip += 6;
|
|
} else if (ix.meta.category == ZYDIS_CATEGORY_UNCOND_BR && ix.meta.branch_type == ZYDIS_BRANCH_TYPE_SHORT) {
|
|
const auto target_address = ip + ix.length + ix.raw.imm[0].value.s;
|
|
auto new_disp = target_address - (tramp_ip + 5);
|
|
|
|
// Handle the case where the target is now in the trampoline.
|
|
if (target_address < m_target + m_original_bytes.size()) {
|
|
new_disp = static_cast<ptrdiff_t>(ix.raw.imm[0].value.s);
|
|
}
|
|
|
|
*tramp_ip = 0xE9;
|
|
store(tramp_ip + 1, static_cast<int32_t>(new_disp));
|
|
tramp_ip += 5;
|
|
} else {
|
|
std::copy_n(ip, ix.length, tramp_ip);
|
|
tramp_ip += ix.length;
|
|
}
|
|
}
|
|
|
|
auto trampoline_epilogue = reinterpret_cast<TrampolineEpilogueE9*>(
|
|
m_trampoline.address() + m_trampoline_size - sizeof(TrampolineEpilogueE9));
|
|
|
|
// jmp from trampoline to original.
|
|
auto src = reinterpret_cast<uint8_t*>(&trampoline_epilogue->jmp_to_original);
|
|
auto dst = m_target + m_original_bytes.size();
|
|
|
|
if (auto result = emit_jmp_e9(src, dst); !result) {
|
|
return tl::unexpected{result.error()};
|
|
}
|
|
|
|
// jmp from trampoline to destination.
|
|
src = reinterpret_cast<uint8_t*>(&trampoline_epilogue->jmp_to_destination);
|
|
dst = m_destination;
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
auto data = reinterpret_cast<uint8_t*>(&trampoline_epilogue->destination_address);
|
|
|
|
if (auto result = emit_jmp_ff(src, dst, data); !result) {
|
|
return tl::unexpected{result.error()};
|
|
}
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
if (auto result = emit_jmp_e9(src, dst); !result) {
|
|
return tl::unexpected{result.error()};
|
|
}
|
|
#endif
|
|
|
|
m_type = Type::E9;
|
|
|
|
return {};
|
|
}
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
tl::expected<void, InlineHook::Error> InlineHook::ff_hook(const std::shared_ptr<Allocator>& allocator) {
|
|
m_original_bytes.clear();
|
|
m_trampoline_size = sizeof(TrampolineEpilogueFF);
|
|
ZydisDecodedInstruction ix{};
|
|
|
|
for (auto ip = m_target; ip < m_target + sizeof(JmpFF) + sizeof(uintptr_t); ip += ix.length) {
|
|
if (!decode(&ix, ip)) {
|
|
return tl::unexpected{Error::failed_to_decode_instruction(ip)};
|
|
}
|
|
|
|
// We can't support any instruction that is IP relative here because
|
|
// ff_hook should only be called if e9_hook failed indicating that
|
|
// we're likely outside the +- 2GB range.
|
|
if (ix.attributes & ZYDIS_ATTRIB_IS_RELATIVE) {
|
|
return tl::unexpected{Error::ip_relative_instruction_out_of_range(ip)};
|
|
}
|
|
|
|
m_original_bytes.insert(m_original_bytes.end(), ip, ip + ix.length);
|
|
m_trampoline_size += ix.length;
|
|
}
|
|
|
|
auto trampoline_allocation = allocator->allocate(m_trampoline_size);
|
|
|
|
if (!trampoline_allocation) {
|
|
return tl::unexpected{Error::bad_allocation(trampoline_allocation.error())};
|
|
}
|
|
|
|
m_trampoline = std::move(*trampoline_allocation);
|
|
|
|
std::copy(m_original_bytes.begin(), m_original_bytes.end(), m_trampoline.data());
|
|
|
|
const auto trampoline_epilogue =
|
|
reinterpret_cast<TrampolineEpilogueFF*>(m_trampoline.data() + m_trampoline_size - sizeof(TrampolineEpilogueFF));
|
|
|
|
// jmp from trampoline to original.
|
|
auto src = reinterpret_cast<uint8_t*>(&trampoline_epilogue->jmp_to_original);
|
|
auto dst = m_target + m_original_bytes.size();
|
|
auto data = reinterpret_cast<uint8_t*>(&trampoline_epilogue->original_address);
|
|
|
|
if (auto result = emit_jmp_ff(src, dst, data); !result) {
|
|
return tl::unexpected{result.error()};
|
|
}
|
|
|
|
m_type = Type::FF;
|
|
|
|
return {};
|
|
}
|
|
#endif
|
|
|
|
tl::expected<void, InlineHook::Error> InlineHook::enable() {
|
|
std::scoped_lock lock{m_mutex};
|
|
|
|
if (m_enabled) {
|
|
return {};
|
|
}
|
|
|
|
std::optional<Error> error;
|
|
|
|
// jmp from original to trampoline.
|
|
trap_threads(m_target, m_trampoline.data(), m_original_bytes.size(), [this, &error] {
|
|
if (m_type == Type::E9) {
|
|
auto trampoline_epilogue = reinterpret_cast<TrampolineEpilogueE9*>(
|
|
m_trampoline.address() + m_trampoline_size - sizeof(TrampolineEpilogueE9));
|
|
|
|
if (auto result = emit_jmp_e9(m_target,
|
|
reinterpret_cast<uint8_t*>(&trampoline_epilogue->jmp_to_destination), m_original_bytes.size());
|
|
!result) {
|
|
error = result.error();
|
|
}
|
|
}
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
if (m_type == Type::FF) {
|
|
if (auto result = emit_jmp_ff(m_target, m_destination, m_target + sizeof(JmpFF), m_original_bytes.size());
|
|
!result) {
|
|
error = result.error();
|
|
}
|
|
}
|
|
#endif
|
|
});
|
|
|
|
if (error) {
|
|
return tl::unexpected{*error};
|
|
}
|
|
|
|
m_enabled = true;
|
|
|
|
return {};
|
|
}
|
|
|
|
tl::expected<void, InlineHook::Error> InlineHook::disable() {
|
|
std::scoped_lock lock{m_mutex};
|
|
|
|
if (!m_enabled) {
|
|
return {};
|
|
}
|
|
|
|
trap_threads(m_trampoline.data(), m_target, m_original_bytes.size(),
|
|
[this] { std::copy(m_original_bytes.begin(), m_original_bytes.end(), m_target); });
|
|
|
|
m_enabled = false;
|
|
|
|
return {};
|
|
}
|
|
|
|
void InlineHook::destroy() {
|
|
[[maybe_unused]] auto disable_result = disable();
|
|
|
|
std::scoped_lock lock{m_mutex};
|
|
|
|
if (!m_trampoline) {
|
|
return;
|
|
}
|
|
|
|
m_trampoline.free();
|
|
}
|
|
} // namespace safetyhook
|
|
|
|
//
|
|
// Source file: vmt_hook.cpp
|
|
//
|
|
|
|
|
|
|
|
namespace safetyhook {
|
|
VmHook::VmHook(VmHook&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
VmHook& VmHook::operator=(VmHook&& other) noexcept {
|
|
destroy();
|
|
m_original_vm = other.m_original_vm;
|
|
m_new_vm = other.m_new_vm;
|
|
m_vmt_entry = other.m_vmt_entry;
|
|
m_new_vmt_allocation = std::move(other.m_new_vmt_allocation);
|
|
other.m_original_vm = nullptr;
|
|
other.m_new_vm = nullptr;
|
|
other.m_vmt_entry = nullptr;
|
|
return *this;
|
|
}
|
|
|
|
VmHook::~VmHook() {
|
|
destroy();
|
|
}
|
|
|
|
void VmHook::reset() {
|
|
*this = {};
|
|
}
|
|
|
|
void VmHook::destroy() {
|
|
if (m_original_vm != nullptr) {
|
|
*m_vmt_entry = m_original_vm;
|
|
m_original_vm = nullptr;
|
|
m_new_vm = nullptr;
|
|
m_vmt_entry = nullptr;
|
|
m_new_vmt_allocation.reset();
|
|
}
|
|
}
|
|
|
|
tl::expected<VmtHook, VmtHook::Error> VmtHook::create(void* object) {
|
|
VmtHook hook{};
|
|
|
|
const auto original_vmt = *reinterpret_cast<uint8_t***>(object);
|
|
hook.m_objects.emplace(object, original_vmt);
|
|
|
|
// Count the number of virtual method pointers. We start at one to account for the RTTI pointer.
|
|
auto num_vmt_entries = 1;
|
|
|
|
for (auto vm = original_vmt; is_executable(*vm); ++vm) {
|
|
++num_vmt_entries;
|
|
}
|
|
|
|
// Allocate memory for the new VMT.
|
|
auto allocation = Allocator::global()->allocate(num_vmt_entries * sizeof(uint8_t*));
|
|
|
|
if (!allocation) {
|
|
return tl::unexpected{Error::bad_allocation(allocation.error())};
|
|
}
|
|
|
|
hook.m_new_vmt_allocation = std::make_shared<Allocation>(std::move(*allocation));
|
|
hook.m_new_vmt = reinterpret_cast<uint8_t**>(hook.m_new_vmt_allocation->data());
|
|
|
|
// Copy pointer to RTTI.
|
|
hook.m_new_vmt[0] = original_vmt[-1];
|
|
|
|
// Copy virtual method pointers.
|
|
for (auto i = 0; i < num_vmt_entries - 1; ++i) {
|
|
hook.m_new_vmt[i + 1] = original_vmt[i];
|
|
}
|
|
|
|
*reinterpret_cast<uint8_t***>(object) = &hook.m_new_vmt[1];
|
|
|
|
return hook;
|
|
}
|
|
|
|
VmtHook::VmtHook(VmtHook&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
VmtHook& VmtHook::operator=(VmtHook&& other) noexcept {
|
|
destroy();
|
|
m_objects = std::move(other.m_objects);
|
|
m_new_vmt_allocation = std::move(other.m_new_vmt_allocation);
|
|
m_new_vmt = other.m_new_vmt;
|
|
other.m_new_vmt = nullptr;
|
|
return *this;
|
|
}
|
|
|
|
VmtHook::~VmtHook() {
|
|
destroy();
|
|
}
|
|
|
|
void VmtHook::apply(void* object) {
|
|
m_objects.emplace(object, *reinterpret_cast<uint8_t***>(object));
|
|
*reinterpret_cast<uint8_t***>(object) = &m_new_vmt[1];
|
|
}
|
|
|
|
void VmtHook::remove(void* object) {
|
|
const auto search = m_objects.find(object);
|
|
|
|
if (search == m_objects.end()) {
|
|
return;
|
|
}
|
|
|
|
const auto original_vmt = search->second;
|
|
|
|
if (!vm_is_writable(reinterpret_cast<uint8_t*>(object), sizeof(void*))) {
|
|
m_objects.erase(search);
|
|
return;
|
|
}
|
|
|
|
if (*reinterpret_cast<uint8_t***>(object) != &m_new_vmt[1]) {
|
|
m_objects.erase(search);
|
|
return;
|
|
}
|
|
|
|
*reinterpret_cast<uint8_t***>(object) = original_vmt;
|
|
|
|
m_objects.erase(search);
|
|
}
|
|
|
|
void VmtHook::reset() {
|
|
*this = {};
|
|
}
|
|
|
|
void VmtHook::destroy() {
|
|
for (const auto [object, original_vmt] : m_objects) {
|
|
if (!vm_is_writable(reinterpret_cast<uint8_t*>(object), sizeof(void*))) {
|
|
continue;
|
|
}
|
|
|
|
if (*reinterpret_cast<uint8_t***>(object) != &m_new_vmt[1]) {
|
|
continue;
|
|
}
|
|
|
|
*reinterpret_cast<uint8_t***>(object) = original_vmt;
|
|
}
|
|
|
|
m_objects.clear();
|
|
m_new_vmt_allocation.reset();
|
|
m_new_vmt = nullptr;
|
|
}
|
|
} // namespace safetyhook
|
|
|
|
//
|
|
// Source file: utility.cpp
|
|
//
|
|
|
|
|
|
|
|
namespace safetyhook {
|
|
bool is_executable(uint8_t* address) {
|
|
return vm_is_executable(address);
|
|
}
|
|
|
|
UnprotectMemory::~UnprotectMemory() {
|
|
if (m_address != nullptr) {
|
|
vm_protect(m_address, m_size, m_original_protection);
|
|
}
|
|
}
|
|
|
|
UnprotectMemory::UnprotectMemory(UnprotectMemory&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
UnprotectMemory& UnprotectMemory::operator=(UnprotectMemory&& other) noexcept {
|
|
if (this != &other) {
|
|
m_address = other.m_address;
|
|
m_size = other.m_size;
|
|
m_original_protection = other.m_original_protection;
|
|
other.m_address = nullptr;
|
|
other.m_size = 0;
|
|
other.m_original_protection = 0;
|
|
}
|
|
|
|
return *this;
|
|
}
|
|
|
|
std::optional<UnprotectMemory> unprotect(uint8_t* address, size_t size) {
|
|
auto old_protection = vm_protect(address, size, VM_ACCESS_RWX);
|
|
|
|
if (!old_protection) {
|
|
return std::nullopt;
|
|
}
|
|
|
|
return UnprotectMemory{address, size, old_protection.value()};
|
|
}
|
|
|
|
} // namespace safetyhook
|
|
|
|
//
|
|
// Source file: mid_hook.cpp
|
|
//
|
|
|
|
#include <algorithm>
|
|
#include <array>
|
|
|
|
|
|
|
|
namespace safetyhook {
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
#if SAFETYHOOK_OS_WINDOWS
|
|
constexpr std::array<uint8_t, 391> asm_data = {0xFF, 0x35, 0x79, 0x01, 0x00, 0x00, 0x54, 0x54, 0x55, 0x50, 0x53, 0x51,
|
|
0x52, 0x56, 0x57, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, 0x57,
|
|
0x9C, 0x48, 0x81, 0xEC, 0x00, 0x01, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0xBC, 0x24, 0xF0, 0x00, 0x00, 0x00, 0xF3,
|
|
0x44, 0x0F, 0x7F, 0xB4, 0x24, 0xE0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0xAC, 0x24, 0xD0, 0x00, 0x00, 0x00,
|
|
0xF3, 0x44, 0x0F, 0x7F, 0xA4, 0x24, 0xC0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x9C, 0x24, 0xB0, 0x00, 0x00,
|
|
0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x94, 0x24, 0xA0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x8C, 0x24, 0x90, 0x00,
|
|
0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x84, 0x24, 0x80, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x7F, 0x7C, 0x24, 0x70, 0xF3,
|
|
0x0F, 0x7F, 0x74, 0x24, 0x60, 0xF3, 0x0F, 0x7F, 0x6C, 0x24, 0x50, 0xF3, 0x0F, 0x7F, 0x64, 0x24, 0x40, 0xF3, 0x0F,
|
|
0x7F, 0x5C, 0x24, 0x30, 0xF3, 0x0F, 0x7F, 0x54, 0x24, 0x20, 0xF3, 0x0F, 0x7F, 0x4C, 0x24, 0x10, 0xF3, 0x0F, 0x7F,
|
|
0x04, 0x24, 0x48, 0x8B, 0x8C, 0x24, 0x80, 0x01, 0x00, 0x00, 0x48, 0x83, 0xC1, 0x10, 0x48, 0x89, 0x8C, 0x24, 0x80,
|
|
0x01, 0x00, 0x00, 0x48, 0x8D, 0x0C, 0x24, 0x48, 0x89, 0xE3, 0x48, 0x83, 0xEC, 0x30, 0x48, 0x83, 0xE4, 0xF0, 0xFF,
|
|
0x15, 0xA8, 0x00, 0x00, 0x00, 0x48, 0x89, 0xDC, 0xF3, 0x0F, 0x6F, 0x04, 0x24, 0xF3, 0x0F, 0x6F, 0x4C, 0x24, 0x10,
|
|
0xF3, 0x0F, 0x6F, 0x54, 0x24, 0x20, 0xF3, 0x0F, 0x6F, 0x5C, 0x24, 0x30, 0xF3, 0x0F, 0x6F, 0x64, 0x24, 0x40, 0xF3,
|
|
0x0F, 0x6F, 0x6C, 0x24, 0x50, 0xF3, 0x0F, 0x6F, 0x74, 0x24, 0x60, 0xF3, 0x0F, 0x6F, 0x7C, 0x24, 0x70, 0xF3, 0x44,
|
|
0x0F, 0x6F, 0x84, 0x24, 0x80, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0x8C, 0x24, 0x90, 0x00, 0x00, 0x00, 0xF3,
|
|
0x44, 0x0F, 0x6F, 0x94, 0x24, 0xA0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0x9C, 0x24, 0xB0, 0x00, 0x00, 0x00,
|
|
0xF3, 0x44, 0x0F, 0x6F, 0xA4, 0x24, 0xC0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0xAC, 0x24, 0xD0, 0x00, 0x00,
|
|
0x00, 0xF3, 0x44, 0x0F, 0x6F, 0xB4, 0x24, 0xE0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0xBC, 0x24, 0xF0, 0x00,
|
|
0x00, 0x00, 0x48, 0x81, 0xC4, 0x00, 0x01, 0x00, 0x00, 0x9D, 0x41, 0x5F, 0x41, 0x5E, 0x41, 0x5D, 0x41, 0x5C, 0x41,
|
|
0x5B, 0x41, 0x5A, 0x41, 0x59, 0x41, 0x58, 0x5F, 0x5E, 0x5A, 0x59, 0x5B, 0x58, 0x5D, 0x48, 0x8D, 0x64, 0x24, 0x08,
|
|
0x5C, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
|
#elif SAFETYHOOK_OS_LINUX
|
|
constexpr std::array<uint8_t, 391> asm_data = {0xFF, 0x35, 0x79, 0x01, 0x00, 0x00, 0x54, 0x54, 0x55, 0x50, 0x53, 0x51,
|
|
0x52, 0x56, 0x57, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, 0x57,
|
|
0x9C, 0x48, 0x81, 0xEC, 0x00, 0x01, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0xBC, 0x24, 0xF0, 0x00, 0x00, 0x00, 0xF3,
|
|
0x44, 0x0F, 0x7F, 0xB4, 0x24, 0xE0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0xAC, 0x24, 0xD0, 0x00, 0x00, 0x00,
|
|
0xF3, 0x44, 0x0F, 0x7F, 0xA4, 0x24, 0xC0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x9C, 0x24, 0xB0, 0x00, 0x00,
|
|
0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x94, 0x24, 0xA0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x8C, 0x24, 0x90, 0x00,
|
|
0x00, 0x00, 0xF3, 0x44, 0x0F, 0x7F, 0x84, 0x24, 0x80, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x7F, 0x7C, 0x24, 0x70, 0xF3,
|
|
0x0F, 0x7F, 0x74, 0x24, 0x60, 0xF3, 0x0F, 0x7F, 0x6C, 0x24, 0x50, 0xF3, 0x0F, 0x7F, 0x64, 0x24, 0x40, 0xF3, 0x0F,
|
|
0x7F, 0x5C, 0x24, 0x30, 0xF3, 0x0F, 0x7F, 0x54, 0x24, 0x20, 0xF3, 0x0F, 0x7F, 0x4C, 0x24, 0x10, 0xF3, 0x0F, 0x7F,
|
|
0x04, 0x24, 0x48, 0x8B, 0xBC, 0x24, 0x80, 0x01, 0x00, 0x00, 0x48, 0x83, 0xC7, 0x10, 0x48, 0x89, 0xBC, 0x24, 0x80,
|
|
0x01, 0x00, 0x00, 0x48, 0x8D, 0x3C, 0x24, 0x48, 0x89, 0xE3, 0x48, 0x83, 0xEC, 0x30, 0x48, 0x83, 0xE4, 0xF0, 0xFF,
|
|
0x15, 0xA8, 0x00, 0x00, 0x00, 0x48, 0x89, 0xDC, 0xF3, 0x0F, 0x6F, 0x04, 0x24, 0xF3, 0x0F, 0x6F, 0x4C, 0x24, 0x10,
|
|
0xF3, 0x0F, 0x6F, 0x54, 0x24, 0x20, 0xF3, 0x0F, 0x6F, 0x5C, 0x24, 0x30, 0xF3, 0x0F, 0x6F, 0x64, 0x24, 0x40, 0xF3,
|
|
0x0F, 0x6F, 0x6C, 0x24, 0x50, 0xF3, 0x0F, 0x6F, 0x74, 0x24, 0x60, 0xF3, 0x0F, 0x6F, 0x7C, 0x24, 0x70, 0xF3, 0x44,
|
|
0x0F, 0x6F, 0x84, 0x24, 0x80, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0x8C, 0x24, 0x90, 0x00, 0x00, 0x00, 0xF3,
|
|
0x44, 0x0F, 0x6F, 0x94, 0x24, 0xA0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0x9C, 0x24, 0xB0, 0x00, 0x00, 0x00,
|
|
0xF3, 0x44, 0x0F, 0x6F, 0xA4, 0x24, 0xC0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0xAC, 0x24, 0xD0, 0x00, 0x00,
|
|
0x00, 0xF3, 0x44, 0x0F, 0x6F, 0xB4, 0x24, 0xE0, 0x00, 0x00, 0x00, 0xF3, 0x44, 0x0F, 0x6F, 0xBC, 0x24, 0xF0, 0x00,
|
|
0x00, 0x00, 0x48, 0x81, 0xC4, 0x00, 0x01, 0x00, 0x00, 0x9D, 0x41, 0x5F, 0x41, 0x5E, 0x41, 0x5D, 0x41, 0x5C, 0x41,
|
|
0x5B, 0x41, 0x5A, 0x41, 0x59, 0x41, 0x58, 0x5F, 0x5E, 0x5A, 0x59, 0x5B, 0x58, 0x5D, 0x48, 0x8D, 0x64, 0x24, 0x08,
|
|
0x5C, 0xC3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
|
#endif
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
constexpr std::array<uint8_t, 171> asm_data = {0xFF, 0x35, 0xA7, 0x00, 0x00, 0x00, 0x54, 0x54, 0x55, 0x50, 0x53, 0x51,
|
|
0x52, 0x56, 0x57, 0x9C, 0x81, 0xEC, 0x80, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x7F, 0x7C, 0x24, 0x70, 0xF3, 0x0F, 0x7F,
|
|
0x74, 0x24, 0x60, 0xF3, 0x0F, 0x7F, 0x6C, 0x24, 0x50, 0xF3, 0x0F, 0x7F, 0x64, 0x24, 0x40, 0xF3, 0x0F, 0x7F, 0x5C,
|
|
0x24, 0x30, 0xF3, 0x0F, 0x7F, 0x54, 0x24, 0x20, 0xF3, 0x0F, 0x7F, 0x4C, 0x24, 0x10, 0xF3, 0x0F, 0x7F, 0x04, 0x24,
|
|
0x8B, 0x8C, 0x24, 0xA0, 0x00, 0x00, 0x00, 0x83, 0xC1, 0x08, 0x89, 0x8C, 0x24, 0xA0, 0x00, 0x00, 0x00, 0x54, 0xFF,
|
|
0x15, 0xA3, 0x00, 0x00, 0x00, 0x83, 0xC4, 0x04, 0xF3, 0x0F, 0x6F, 0x04, 0x24, 0xF3, 0x0F, 0x6F, 0x4C, 0x24, 0x10,
|
|
0xF3, 0x0F, 0x6F, 0x54, 0x24, 0x20, 0xF3, 0x0F, 0x6F, 0x5C, 0x24, 0x30, 0xF3, 0x0F, 0x6F, 0x64, 0x24, 0x40, 0xF3,
|
|
0x0F, 0x6F, 0x6C, 0x24, 0x50, 0xF3, 0x0F, 0x6F, 0x74, 0x24, 0x60, 0xF3, 0x0F, 0x6F, 0x7C, 0x24, 0x70, 0x81, 0xC4,
|
|
0x80, 0x00, 0x00, 0x00, 0x9D, 0x5F, 0x5E, 0x5A, 0x59, 0x5B, 0x58, 0x5D, 0x8D, 0x64, 0x24, 0x04, 0x5C, 0xC3, 0x00,
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
|
#endif
|
|
|
|
tl::expected<MidHook, MidHook::Error> MidHook::create(void* target, MidHookFn destination, Flags flags) {
|
|
return create(Allocator::global(), target, destination, flags);
|
|
}
|
|
|
|
tl::expected<MidHook, MidHook::Error> MidHook::create(
|
|
const std::shared_ptr<Allocator>& allocator, void* target, MidHookFn destination, Flags flags) {
|
|
MidHook hook{};
|
|
|
|
if (const auto setup_result = hook.setup(allocator, reinterpret_cast<uint8_t*>(target), destination);
|
|
!setup_result) {
|
|
return tl::unexpected{setup_result.error()};
|
|
}
|
|
|
|
if (!(flags & StartDisabled)) {
|
|
if (auto enable_result = hook.enable(); !enable_result) {
|
|
return tl::unexpected{enable_result.error()};
|
|
}
|
|
}
|
|
|
|
return hook;
|
|
}
|
|
|
|
MidHook::MidHook(MidHook&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
MidHook& MidHook::operator=(MidHook&& other) noexcept {
|
|
if (this != &other) {
|
|
m_hook = std::move(other.m_hook);
|
|
m_target = other.m_target;
|
|
m_stub = std::move(other.m_stub);
|
|
m_destination = other.m_destination;
|
|
|
|
other.m_target = 0;
|
|
other.m_destination = nullptr;
|
|
}
|
|
|
|
return *this;
|
|
}
|
|
|
|
void MidHook::reset() {
|
|
*this = {};
|
|
}
|
|
|
|
tl::expected<void, MidHook::Error> MidHook::setup(
|
|
const std::shared_ptr<Allocator>& allocator, uint8_t* target, MidHookFn destination_fn) {
|
|
m_target = target;
|
|
m_destination = destination_fn;
|
|
|
|
auto stub_allocation = allocator->allocate(asm_data.size());
|
|
|
|
if (!stub_allocation) {
|
|
return tl::unexpected{Error::bad_allocation(stub_allocation.error())};
|
|
}
|
|
|
|
m_stub = std::move(*stub_allocation);
|
|
|
|
std::copy(asm_data.begin(), asm_data.end(), m_stub.data());
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
store(m_stub.data() + sizeof(asm_data) - 16, m_destination);
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
store(m_stub.data() + sizeof(asm_data) - 8, m_destination);
|
|
|
|
// 32-bit has some relocations we need to fix up as well.
|
|
store(m_stub.data() + 0x02, m_stub.data() + m_stub.size() - 4);
|
|
store(m_stub.data() + 0x59, m_stub.data() + m_stub.size() - 8);
|
|
#endif
|
|
|
|
auto hook_result = InlineHook::create(allocator, m_target, m_stub.data(), InlineHook::StartDisabled);
|
|
|
|
if (!hook_result) {
|
|
m_stub.free();
|
|
return tl::unexpected{Error::bad_inline_hook(hook_result.error())};
|
|
}
|
|
|
|
m_hook = std::move(*hook_result);
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
store(m_stub.data() + sizeof(asm_data) - 8, m_hook.trampoline().data());
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
store(m_stub.data() + sizeof(asm_data) - 4, m_hook.trampoline().data());
|
|
#endif
|
|
|
|
return {};
|
|
}
|
|
|
|
tl::expected<void, MidHook::Error> MidHook::enable() {
|
|
if (auto enable_result = m_hook.enable(); !enable_result) {
|
|
return tl::unexpected{Error::bad_inline_hook(enable_result.error())};
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
tl::expected<void, MidHook::Error> MidHook::disable() {
|
|
if (auto disable_result = m_hook.disable(); !disable_result) {
|
|
return tl::unexpected{Error::bad_inline_hook(disable_result.error())};
|
|
}
|
|
|
|
return {};
|
|
}
|
|
} // namespace safetyhook
|
|
|
|
//
|
|
// Source file: os.linux.cpp
|
|
//
|
|
|
|
|
|
#if SAFETYHOOK_OS_LINUX
|
|
|
|
#include <cstdio>
|
|
|
|
#include <sys/mman.h>
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
namespace safetyhook {
|
|
tl::expected<uint8_t*, OsError> vm_allocate(uint8_t* address, size_t size, VmAccess access) {
|
|
int prot = 0;
|
|
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
|
|
if (access == VM_ACCESS_R) {
|
|
prot = PROT_READ;
|
|
} else if (access == VM_ACCESS_RW) {
|
|
prot = PROT_READ | PROT_WRITE;
|
|
} else if (access == VM_ACCESS_RX) {
|
|
prot = PROT_READ | PROT_EXEC;
|
|
} else if (access == VM_ACCESS_RWX) {
|
|
prot = PROT_READ | PROT_WRITE | PROT_EXEC;
|
|
} else {
|
|
return tl::unexpected{OsError::FAILED_TO_ALLOCATE};
|
|
}
|
|
|
|
auto* result = mmap(address, size, prot, flags, -1, 0);
|
|
|
|
if (result == MAP_FAILED) {
|
|
return tl::unexpected{OsError::FAILED_TO_ALLOCATE};
|
|
}
|
|
|
|
return static_cast<uint8_t*>(result);
|
|
}
|
|
|
|
void vm_free(uint8_t* address) {
|
|
munmap(address, 0);
|
|
}
|
|
|
|
tl::expected<uint32_t, OsError> vm_protect(uint8_t* address, size_t size, VmAccess access) {
|
|
int prot = 0;
|
|
|
|
if (access == VM_ACCESS_R) {
|
|
prot = PROT_READ;
|
|
} else if (access == VM_ACCESS_RW) {
|
|
prot = PROT_READ | PROT_WRITE;
|
|
} else if (access == VM_ACCESS_RX) {
|
|
prot = PROT_READ | PROT_EXEC;
|
|
} else if (access == VM_ACCESS_RWX) {
|
|
prot = PROT_READ | PROT_WRITE | PROT_EXEC;
|
|
} else {
|
|
return tl::unexpected{OsError::FAILED_TO_PROTECT};
|
|
}
|
|
|
|
return vm_protect(address, size, prot);
|
|
}
|
|
|
|
tl::expected<uint32_t, OsError> vm_protect(uint8_t* address, size_t size, uint32_t protect) {
|
|
auto mbi = vm_query(address);
|
|
|
|
if (!mbi.has_value()) {
|
|
return tl::unexpected{OsError::FAILED_TO_PROTECT};
|
|
}
|
|
|
|
uint32_t old_protect = 0;
|
|
|
|
if (mbi->access.read) {
|
|
old_protect |= PROT_READ;
|
|
}
|
|
|
|
if (mbi->access.write) {
|
|
old_protect |= PROT_WRITE;
|
|
}
|
|
|
|
if (mbi->access.execute) {
|
|
old_protect |= PROT_EXEC;
|
|
}
|
|
|
|
auto* addr = align_down(address, static_cast<size_t>(sysconf(_SC_PAGESIZE)));
|
|
|
|
if (mprotect(addr, size, static_cast<int>(protect)) == -1) {
|
|
return tl::unexpected{OsError::FAILED_TO_PROTECT};
|
|
}
|
|
|
|
return old_protect;
|
|
}
|
|
|
|
tl::expected<VmBasicInfo, OsError> vm_query(uint8_t* address) {
|
|
auto* maps = fopen("/proc/self/maps", "r");
|
|
|
|
if (maps == nullptr) {
|
|
return tl::unexpected{OsError::FAILED_TO_QUERY};
|
|
}
|
|
|
|
char line[512];
|
|
unsigned long start;
|
|
unsigned long end;
|
|
char perms[5];
|
|
unsigned long offset;
|
|
int dev_major;
|
|
int dev_minor;
|
|
unsigned long inode;
|
|
char path[256];
|
|
unsigned long last_end =
|
|
reinterpret_cast<unsigned long>(system_info().min_address); // Track the end address of the last mapping.
|
|
auto addr = reinterpret_cast<unsigned long>(address);
|
|
std::optional<VmBasicInfo> info = std::nullopt;
|
|
|
|
while (fgets(line, sizeof(line), maps) != nullptr) {
|
|
path[0] = '\0';
|
|
|
|
sscanf(line, "%lx-%lx %4s %lx %x:%x %lu %255[^\n]", &start, &end, perms, &offset, &dev_major, &dev_minor,
|
|
&inode, path);
|
|
|
|
if (last_end < start && addr >= last_end && addr < start) {
|
|
VmBasicInfo newInfo;
|
|
newInfo.address = reinterpret_cast<uint8_t*>(last_end);
|
|
newInfo.size = start - last_end;
|
|
newInfo.access = VmAccess();
|
|
newInfo.is_free = true;
|
|
|
|
info = newInfo;
|
|
break;
|
|
}
|
|
|
|
last_end = end;
|
|
|
|
if (addr >= start && addr < end) {
|
|
VmBasicInfo newInfo;
|
|
newInfo.address = reinterpret_cast<uint8_t*>(start);
|
|
newInfo.size = end - start,
|
|
newInfo.access = VmAccess();
|
|
newInfo.is_free = false;
|
|
|
|
info = newInfo;
|
|
|
|
if (perms[0] == 'r') {
|
|
info->access.read = true;
|
|
}
|
|
|
|
if (perms[1] == 'w') {
|
|
info->access.write = true;
|
|
}
|
|
|
|
if (perms[2] == 'x') {
|
|
info->access.execute = true;
|
|
}
|
|
|
|
break;
|
|
}
|
|
}
|
|
|
|
fclose(maps);
|
|
|
|
if (!info.has_value()) {
|
|
return tl::unexpected{OsError::FAILED_TO_QUERY};
|
|
}
|
|
|
|
return info.value();
|
|
}
|
|
|
|
bool vm_is_readable(uint8_t* address, [[maybe_unused]] size_t size) {
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.read;
|
|
}
|
|
|
|
bool vm_is_writable(uint8_t* address, [[maybe_unused]] size_t size) {
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.write;
|
|
}
|
|
|
|
bool vm_is_executable(uint8_t* address) {
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.execute;
|
|
}
|
|
|
|
SystemInfo system_info() {
|
|
auto page_size = static_cast<uint32_t>(sysconf(_SC_PAGESIZE));
|
|
|
|
return {
|
|
.page_size = page_size,
|
|
.allocation_granularity = page_size,
|
|
.min_address = reinterpret_cast<uint8_t*>(0x10000),
|
|
.max_address = reinterpret_cast<uint8_t*>(1ull << 47),
|
|
};
|
|
}
|
|
|
|
void trap_threads([[maybe_unused]] uint8_t* from, [[maybe_unused]] uint8_t* to, [[maybe_unused]] size_t len,
|
|
const std::function<void()>& run_fn) {
|
|
auto from_protect = vm_protect(from, len, VM_ACCESS_RWX).value_or(0);
|
|
auto to_protect = vm_protect(to, len, VM_ACCESS_RWX).value_or(0);
|
|
run_fn();
|
|
vm_protect(to, len, to_protect);
|
|
vm_protect(from, len, from_protect);
|
|
}
|
|
|
|
void fix_ip([[maybe_unused]] ThreadContext ctx, [[maybe_unused]] uint8_t* old_ip, [[maybe_unused]] uint8_t* new_ip) {
|
|
}
|
|
|
|
} // namespace safetyhook
|
|
|
|
#endif
|
|
|
|
//
|
|
// Source file: allocator.cpp
|
|
//
|
|
|
|
#include <algorithm>
|
|
#include <functional>
|
|
#include <limits>
|
|
|
|
|
|
|
|
|
|
namespace safetyhook {
|
|
Allocation::Allocation(Allocation&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
Allocation& Allocation::operator=(Allocation&& other) noexcept {
|
|
if (this != &other) {
|
|
free();
|
|
|
|
m_allocator = std::move(other.m_allocator);
|
|
m_address = other.m_address;
|
|
m_size = other.m_size;
|
|
|
|
other.m_address = nullptr;
|
|
other.m_size = 0;
|
|
}
|
|
|
|
return *this;
|
|
}
|
|
|
|
Allocation::~Allocation() {
|
|
free();
|
|
}
|
|
|
|
void Allocation::free() {
|
|
if (m_allocator && m_address != nullptr && m_size != 0) {
|
|
m_allocator->free(m_address, m_size);
|
|
m_address = nullptr;
|
|
m_size = 0;
|
|
m_allocator.reset();
|
|
}
|
|
}
|
|
|
|
Allocation::Allocation(std::shared_ptr<Allocator> allocator, uint8_t* address, size_t size) noexcept
|
|
: m_allocator{std::move(allocator)}, m_address{address}, m_size{size} {
|
|
}
|
|
|
|
std::shared_ptr<Allocator> Allocator::global() {
|
|
static std::weak_ptr<Allocator> global_allocator{};
|
|
static std::mutex global_allocator_mutex{};
|
|
|
|
std::scoped_lock lock{global_allocator_mutex};
|
|
|
|
if (auto allocator = global_allocator.lock()) {
|
|
return allocator;
|
|
}
|
|
|
|
auto allocator = Allocator::create();
|
|
|
|
global_allocator = allocator;
|
|
|
|
return allocator;
|
|
}
|
|
|
|
std::shared_ptr<Allocator> Allocator::create() {
|
|
return std::shared_ptr<Allocator>{new Allocator{}};
|
|
}
|
|
|
|
tl::expected<Allocation, Allocator::Error> Allocator::allocate(size_t size) {
|
|
return allocate_near({}, size, std::numeric_limits<size_t>::max());
|
|
}
|
|
|
|
tl::expected<Allocation, Allocator::Error> Allocator::allocate_near(
|
|
const std::vector<uint8_t*>& desired_addresses, size_t size, size_t max_distance) {
|
|
std::scoped_lock lock{m_mutex};
|
|
return internal_allocate_near(desired_addresses, size, max_distance);
|
|
}
|
|
|
|
void Allocator::free(uint8_t* address, size_t size) {
|
|
std::scoped_lock lock{m_mutex};
|
|
return internal_free(address, size);
|
|
}
|
|
|
|
tl::expected<Allocation, Allocator::Error> Allocator::internal_allocate_near(
|
|
const std::vector<uint8_t*>& desired_addresses, size_t size, size_t max_distance) {
|
|
// First search through our list of allocations for a free block that is large
|
|
// enough.
|
|
for (const auto& allocation : m_memory) {
|
|
if (allocation->size < size) {
|
|
continue;
|
|
}
|
|
|
|
for (auto node = allocation->freelist.get(); node != nullptr; node = node->next.get()) {
|
|
// Enough room?
|
|
if (static_cast<size_t>(node->end - node->start) < size) {
|
|
continue;
|
|
}
|
|
|
|
const auto address = node->start;
|
|
|
|
// Close enough?
|
|
if (!in_range(address, desired_addresses, max_distance)) {
|
|
continue;
|
|
}
|
|
|
|
node->start += size;
|
|
|
|
return Allocation{shared_from_this(), address, size};
|
|
}
|
|
}
|
|
|
|
// If we didn't find a free block, we need to allocate a new one.
|
|
auto allocation_size = align_up(size, system_info().allocation_granularity);
|
|
auto allocation_address = allocate_nearby_memory(desired_addresses, allocation_size, max_distance);
|
|
|
|
if (!allocation_address) {
|
|
return tl::unexpected{allocation_address.error()};
|
|
}
|
|
|
|
auto& allocation = m_memory.emplace_back(new Memory);
|
|
|
|
allocation->address = *allocation_address;
|
|
allocation->size = allocation_size;
|
|
allocation->freelist = std::make_unique<FreeNode>();
|
|
allocation->freelist->start = *allocation_address + size;
|
|
allocation->freelist->end = *allocation_address + allocation_size;
|
|
|
|
return Allocation{shared_from_this(), *allocation_address, size};
|
|
}
|
|
|
|
void Allocator::internal_free(uint8_t* address, size_t size) {
|
|
for (const auto& allocation : m_memory) {
|
|
if (allocation->address > address || allocation->address + allocation->size < address) {
|
|
continue;
|
|
}
|
|
|
|
// Find the right place for our new freenode.
|
|
FreeNode* prev{};
|
|
|
|
for (auto node = allocation->freelist.get(); node != nullptr; prev = node, node = node->next.get()) {
|
|
if (node->start > address) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Add new freenode.
|
|
auto free_node = std::make_unique<FreeNode>();
|
|
|
|
free_node->start = address;
|
|
free_node->end = address + size;
|
|
|
|
if (prev == nullptr) {
|
|
free_node->next.swap(allocation->freelist);
|
|
allocation->freelist.swap(free_node);
|
|
} else {
|
|
free_node->next.swap(prev->next);
|
|
prev->next.swap(free_node);
|
|
}
|
|
|
|
combine_adjacent_freenodes(*allocation);
|
|
break;
|
|
}
|
|
}
|
|
|
|
void Allocator::combine_adjacent_freenodes(Memory& memory) {
|
|
for (auto prev = memory.freelist.get(), node = prev; node != nullptr; node = node->next.get()) {
|
|
if (prev->end == node->start) {
|
|
prev->end = node->end;
|
|
prev->next.swap(node->next);
|
|
node->next.reset();
|
|
node = prev;
|
|
} else {
|
|
prev = node;
|
|
}
|
|
}
|
|
}
|
|
|
|
tl::expected<uint8_t*, Allocator::Error> Allocator::allocate_nearby_memory(
|
|
const std::vector<uint8_t*>& desired_addresses, size_t size, size_t max_distance) {
|
|
if (desired_addresses.empty()) {
|
|
if (auto result = vm_allocate(nullptr, size, VM_ACCESS_RWX)) {
|
|
return result.value();
|
|
}
|
|
|
|
return tl::unexpected{Error::BAD_VIRTUAL_ALLOC};
|
|
}
|
|
|
|
auto attempt_allocation = [&](uint8_t* p) -> uint8_t* {
|
|
if (!in_range(p, desired_addresses, max_distance)) {
|
|
return nullptr;
|
|
}
|
|
|
|
if (auto result = vm_allocate(p, size, VM_ACCESS_RWX)) {
|
|
return result.value();
|
|
}
|
|
|
|
return nullptr;
|
|
};
|
|
|
|
auto si = system_info();
|
|
auto desired_address = desired_addresses[0];
|
|
auto search_start = si.min_address;
|
|
auto search_end = si.max_address;
|
|
|
|
if (static_cast<size_t>(desired_address - search_start) > max_distance) {
|
|
search_start = desired_address - max_distance;
|
|
}
|
|
|
|
if (static_cast<size_t>(search_end - desired_address) > max_distance) {
|
|
search_end = desired_address + max_distance;
|
|
}
|
|
|
|
search_start = std::max(search_start, si.min_address);
|
|
search_end = std::min(search_end, si.max_address);
|
|
desired_address = align_up(desired_address, si.allocation_granularity);
|
|
VmBasicInfo mbi{};
|
|
|
|
// Search backwards from the desired_address.
|
|
for (auto p = desired_address; p > search_start && in_range(p, desired_addresses, max_distance);
|
|
p = align_down(mbi.address - 1, si.allocation_granularity)) {
|
|
auto result = vm_query(p);
|
|
|
|
if (!result) {
|
|
break;
|
|
}
|
|
|
|
mbi = result.value();
|
|
|
|
if (!mbi.is_free) {
|
|
continue;
|
|
}
|
|
|
|
if (auto allocation_address = attempt_allocation(p); allocation_address != nullptr) {
|
|
return allocation_address;
|
|
}
|
|
}
|
|
|
|
// Search forwards from the desired_address.
|
|
for (auto p = desired_address; p < search_end && in_range(p, desired_addresses, max_distance); p += mbi.size) {
|
|
auto result = vm_query(p);
|
|
|
|
if (!result) {
|
|
break;
|
|
}
|
|
|
|
mbi = result.value();
|
|
|
|
if (!mbi.is_free) {
|
|
continue;
|
|
}
|
|
|
|
if (auto allocation_address = attempt_allocation(p); allocation_address != nullptr) {
|
|
return allocation_address;
|
|
}
|
|
}
|
|
|
|
return tl::unexpected{Error::NO_MEMORY_IN_RANGE};
|
|
}
|
|
|
|
bool Allocator::in_range(uint8_t* address, const std::vector<uint8_t*>& desired_addresses, size_t max_distance) {
|
|
bool ret = true;
|
|
for (auto desired_address = desired_addresses.begin(); desired_address != desired_addresses.end(); desired_address++) {
|
|
uint8_t* value = *desired_address;
|
|
|
|
const size_t delta = (address > value) ? address - value : value - address;
|
|
ret &= (delta <= max_distance);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
Allocator::Memory::~Memory() {
|
|
vm_free(address);
|
|
}
|
|
} // namespace safetyhook
|
|
|
|
//
|
|
// Source file: os.windows.cpp
|
|
//
|
|
|
|
#include <map>
|
|
#include <memory>
|
|
#include <mutex>
|
|
|
|
|
|
#if SAFETYHOOK_OS_WINDOWS
|
|
|
|
#define NOMINMAX
|
|
#if __has_include(<Windows.h>)
|
|
#include <Windows.h>
|
|
#elif __has_include(<windows.h>)
|
|
#include <windows.h>
|
|
#else
|
|
#error "Windows.h not found"
|
|
#endif
|
|
|
|
|
|
namespace safetyhook {
|
|
tl::expected<uint8_t*, OsError> vm_allocate(uint8_t* address, size_t size, VmAccess access) {
|
|
DWORD protect = 0;
|
|
|
|
if (access == VM_ACCESS_R) {
|
|
protect = PAGE_READONLY;
|
|
} else if (access == VM_ACCESS_RW) {
|
|
protect = PAGE_READWRITE;
|
|
} else if (access == VM_ACCESS_RX) {
|
|
protect = PAGE_EXECUTE_READ;
|
|
} else if (access == VM_ACCESS_RWX) {
|
|
protect = PAGE_EXECUTE_READWRITE;
|
|
} else {
|
|
return tl::unexpected{OsError::FAILED_TO_ALLOCATE};
|
|
}
|
|
|
|
auto* result = VirtualAlloc(address, size, MEM_COMMIT | MEM_RESERVE, protect);
|
|
|
|
if (result == nullptr) {
|
|
return tl::unexpected{OsError::FAILED_TO_ALLOCATE};
|
|
}
|
|
|
|
return static_cast<uint8_t*>(result);
|
|
}
|
|
|
|
void vm_free(uint8_t* address) {
|
|
VirtualFree(address, 0, MEM_RELEASE);
|
|
}
|
|
|
|
tl::expected<uint32_t, OsError> vm_protect(uint8_t* address, size_t size, VmAccess access) {
|
|
DWORD protect = 0;
|
|
|
|
if (access == VM_ACCESS_R) {
|
|
protect = PAGE_READONLY;
|
|
} else if (access == VM_ACCESS_RW) {
|
|
protect = PAGE_READWRITE;
|
|
} else if (access == VM_ACCESS_RX) {
|
|
protect = PAGE_EXECUTE_READ;
|
|
} else if (access == VM_ACCESS_RWX) {
|
|
protect = PAGE_EXECUTE_READWRITE;
|
|
} else {
|
|
return tl::unexpected{OsError::FAILED_TO_PROTECT};
|
|
}
|
|
|
|
return vm_protect(address, size, protect);
|
|
}
|
|
|
|
tl::expected<uint32_t, OsError> vm_protect(uint8_t* address, size_t size, uint32_t protect) {
|
|
DWORD old_protect = 0;
|
|
|
|
if (VirtualProtect(address, size, protect, &old_protect) == FALSE) {
|
|
return tl::unexpected{OsError::FAILED_TO_PROTECT};
|
|
}
|
|
|
|
return old_protect;
|
|
}
|
|
|
|
tl::expected<VmBasicInfo, OsError> vm_query(uint8_t* address) {
|
|
MEMORY_BASIC_INFORMATION mbi{};
|
|
auto result = VirtualQuery(address, &mbi, sizeof(mbi));
|
|
|
|
if (result == 0) {
|
|
return tl::unexpected{OsError::FAILED_TO_QUERY};
|
|
}
|
|
|
|
VmAccess access(
|
|
(mbi.Protect & (PAGE_READONLY | PAGE_READWRITE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) != 0,
|
|
(mbi.Protect & (PAGE_READWRITE | PAGE_EXECUTE_READWRITE)) != 0,
|
|
(mbi.Protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) != 0
|
|
);
|
|
|
|
VmBasicInfo retInfo;
|
|
retInfo.address = static_cast<uint8_t*>(mbi.AllocationBase);
|
|
retInfo.size = mbi.RegionSize;
|
|
retInfo.access = access;
|
|
retInfo.is_free = (mbi.State == MEM_FREE);
|
|
return retInfo;
|
|
}
|
|
|
|
bool vm_is_readable(uint8_t* address, size_t size) {
|
|
return IsBadReadPtr(address, size) == FALSE;
|
|
}
|
|
|
|
bool vm_is_writable(uint8_t* address, size_t size) {
|
|
return IsBadWritePtr(address, size) == FALSE;
|
|
}
|
|
|
|
bool vm_is_executable(uint8_t* address) {
|
|
LPVOID image_base_ptr;
|
|
|
|
if (RtlPcToFileHeader(address, &image_base_ptr) == nullptr) {
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.execute;
|
|
}
|
|
|
|
// Just check if the section is executable.
|
|
const auto* image_base = reinterpret_cast<uint8_t*>(image_base_ptr);
|
|
const auto* dos_hdr = reinterpret_cast<const IMAGE_DOS_HEADER*>(image_base);
|
|
|
|
if (dos_hdr->e_magic != IMAGE_DOS_SIGNATURE) {
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.execute;
|
|
}
|
|
|
|
const auto* nt_hdr = reinterpret_cast<const IMAGE_NT_HEADERS*>(image_base + dos_hdr->e_lfanew);
|
|
|
|
if (nt_hdr->Signature != IMAGE_NT_SIGNATURE) {
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.execute;
|
|
}
|
|
|
|
const auto* section = IMAGE_FIRST_SECTION(nt_hdr);
|
|
|
|
for (auto i = 0; i < nt_hdr->FileHeader.NumberOfSections; ++i, ++section) {
|
|
if (address >= image_base + section->VirtualAddress &&
|
|
address < image_base + section->VirtualAddress + section->Misc.VirtualSize) {
|
|
return (section->Characteristics & IMAGE_SCN_MEM_EXECUTE) != 0;
|
|
}
|
|
}
|
|
|
|
return vm_query(address).value_or(VmBasicInfo{}).access.execute;
|
|
}
|
|
|
|
SystemInfo system_info() {
|
|
SystemInfo info{};
|
|
|
|
SYSTEM_INFO si{};
|
|
GetSystemInfo(&si);
|
|
|
|
info.page_size = si.dwPageSize;
|
|
info.allocation_granularity = si.dwAllocationGranularity;
|
|
info.min_address = static_cast<uint8_t*>(si.lpMinimumApplicationAddress);
|
|
info.max_address = static_cast<uint8_t*>(si.lpMaximumApplicationAddress);
|
|
|
|
return info;
|
|
}
|
|
|
|
struct TrapInfo {
|
|
uint8_t* from_page_start;
|
|
uint8_t* from_page_end;
|
|
uint8_t* from;
|
|
uint8_t* to_page_start;
|
|
uint8_t* to_page_end;
|
|
uint8_t* to;
|
|
size_t len;
|
|
};
|
|
|
|
class TrapManager final {
|
|
public:
|
|
static std::mutex mutex;
|
|
static std::unique_ptr<TrapManager> instance;
|
|
|
|
TrapManager() { m_trap_veh = AddVectoredExceptionHandler(1, trap_handler); }
|
|
~TrapManager() {
|
|
if (m_trap_veh != nullptr) {
|
|
RemoveVectoredExceptionHandler(m_trap_veh);
|
|
}
|
|
}
|
|
|
|
TrapInfo* find_trap(uint8_t* address) {
|
|
auto search = std::find_if(m_traps.begin(), m_traps.end(), [address](auto& trap) {
|
|
return address >= trap.second.from && address < trap.second.from + trap.second.len;
|
|
});
|
|
|
|
if (search == m_traps.end()) {
|
|
return nullptr;
|
|
}
|
|
|
|
return &search->second;
|
|
}
|
|
|
|
TrapInfo* find_trap_page(uint8_t* address) {
|
|
auto search = std::find_if(m_traps.begin(), m_traps.end(), [address](auto& trap) {
|
|
return address >= trap.second.from_page_start && address < trap.second.from_page_end;
|
|
});
|
|
|
|
if (search != m_traps.end()) {
|
|
return &search->second;
|
|
}
|
|
|
|
search = std::find_if(m_traps.begin(), m_traps.end(), [address](auto& trap) {
|
|
return address >= trap.second.to_page_start && address < trap.second.to_page_end;
|
|
});
|
|
|
|
if (search != m_traps.end()) {
|
|
return &search->second;
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
void add_trap(uint8_t* from, uint8_t* to, size_t len) {
|
|
TrapInfo info;
|
|
info.from_page_start = align_down(from, 0x1000);
|
|
info.from_page_end = align_up(from + len, 0x1000);
|
|
info.from = from;
|
|
info.to_page_start = align_down(to, 0x1000);
|
|
info.to_page_end = align_up(to + len, 0x1000);
|
|
info.to = to;
|
|
info.len = len;
|
|
m_traps.insert_or_assign(from, info);
|
|
}
|
|
|
|
private:
|
|
std::map<uint8_t*, TrapInfo> m_traps;
|
|
PVOID m_trap_veh{};
|
|
|
|
static LONG CALLBACK trap_handler(PEXCEPTION_POINTERS exp) {
|
|
auto exception_code = exp->ExceptionRecord->ExceptionCode;
|
|
|
|
if (exception_code != EXCEPTION_ACCESS_VIOLATION) {
|
|
return EXCEPTION_CONTINUE_SEARCH;
|
|
}
|
|
|
|
std::scoped_lock lock{mutex};
|
|
auto* faulting_address = reinterpret_cast<uint8_t*>(exp->ExceptionRecord->ExceptionInformation[1]);
|
|
auto* trap = instance->find_trap(faulting_address);
|
|
|
|
if (trap == nullptr) {
|
|
if (instance->find_trap_page(faulting_address) != nullptr) {
|
|
return EXCEPTION_CONTINUE_EXECUTION;
|
|
} else {
|
|
return EXCEPTION_CONTINUE_SEARCH;
|
|
}
|
|
}
|
|
|
|
auto* ctx = exp->ContextRecord;
|
|
|
|
for (size_t i = 0; i < trap->len; i++) {
|
|
fix_ip(ctx, trap->from + i, trap->to + i);
|
|
}
|
|
|
|
return EXCEPTION_CONTINUE_EXECUTION;
|
|
}
|
|
};
|
|
|
|
std::mutex TrapManager::mutex;
|
|
std::unique_ptr<TrapManager> TrapManager::instance;
|
|
|
|
void find_me() {
|
|
}
|
|
|
|
void trap_threads(uint8_t* from, uint8_t* to, size_t len, const std::function<void()>& run_fn) {
|
|
MEMORY_BASIC_INFORMATION find_me_mbi{};
|
|
MEMORY_BASIC_INFORMATION from_mbi{};
|
|
MEMORY_BASIC_INFORMATION to_mbi{};
|
|
|
|
VirtualQuery(reinterpret_cast<void*>(find_me), &find_me_mbi, sizeof(find_me_mbi));
|
|
VirtualQuery(from, &from_mbi, sizeof(from_mbi));
|
|
VirtualQuery(to, &to_mbi, sizeof(to_mbi));
|
|
|
|
auto new_protect = PAGE_READWRITE;
|
|
|
|
if (from_mbi.AllocationBase == find_me_mbi.AllocationBase || to_mbi.AllocationBase == find_me_mbi.AllocationBase) {
|
|
new_protect = PAGE_EXECUTE_READWRITE;
|
|
}
|
|
|
|
std::scoped_lock lock{TrapManager::mutex};
|
|
|
|
if (TrapManager::instance == nullptr) {
|
|
TrapManager::instance = std::make_unique<TrapManager>();
|
|
}
|
|
|
|
TrapManager::instance->add_trap(from, to, len);
|
|
|
|
DWORD from_protect;
|
|
DWORD to_protect;
|
|
|
|
VirtualProtect(from, len, new_protect, &from_protect);
|
|
VirtualProtect(to, len, new_protect, &to_protect);
|
|
|
|
if (run_fn) {
|
|
run_fn();
|
|
}
|
|
|
|
VirtualProtect(to, len, to_protect, &to_protect);
|
|
VirtualProtect(from, len, from_protect, &from_protect);
|
|
}
|
|
|
|
void fix_ip(ThreadContext thread_ctx, uint8_t* old_ip, uint8_t* new_ip) {
|
|
auto* ctx = reinterpret_cast<CONTEXT*>(thread_ctx);
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
auto ip = ctx->Rip;
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
auto ip = ctx->Eip;
|
|
#endif
|
|
|
|
if (ip == reinterpret_cast<uintptr_t>(old_ip)) {
|
|
ip = reinterpret_cast<uintptr_t>(new_ip);
|
|
}
|
|
|
|
#if SAFETYHOOK_ARCH_X86_64
|
|
ctx->Rip = ip;
|
|
#elif SAFETYHOOK_ARCH_X86_32
|
|
ctx->Eip = ip;
|
|
#endif
|
|
}
|
|
|
|
} // namespace safetyhook
|
|
|
|
#endif
|
|
|
|
//
|
|
// Source file: easy.cpp
|
|
//
|
|
|
|
|
|
namespace safetyhook {
|
|
InlineHook create_inline(void* target, void* destination, InlineHook::Flags flags) {
|
|
if (auto hook = InlineHook::create(target, destination, flags)) {
|
|
return std::move(*hook);
|
|
} else {
|
|
return {};
|
|
}
|
|
}
|
|
|
|
MidHook create_mid(void* target, MidHookFn destination, MidHook::Flags flags) {
|
|
if (auto hook = MidHook::create(target, destination, flags)) {
|
|
return std::move(*hook);
|
|
} else {
|
|
return {};
|
|
}
|
|
}
|
|
|
|
VmtHook create_vmt(void* object) {
|
|
if (auto hook = VmtHook::create(object)) {
|
|
return std::move(*hook);
|
|
} else {
|
|
return {};
|
|
}
|
|
}
|
|
} // namespace safetyhook
|