#include "AsmHelper64.h" #include namespace blackbone { AsmHelper64::AsmHelper64( ) : IAsmHelper( asmjit::kArchX64 ) ,_stackEnabled( true ) { } AsmHelper64::~AsmHelper64( void ) { } /// /// Generate function prologue code /// /// true if execution must be swithed to x64 mode void AsmHelper64::GenPrologue( bool switchMode /*= false*/ ) { // If switch is required, function was called from x86 mode, // so arguments can't be saved in x64 way, because there is no shadow space on the stack if (switchMode) { SwitchTo64(); // Align stack _assembler.and_( _assembler.zsp, 0xFFFFFFFFFFFFFFF0 ); } else { _assembler.mov( asmjit::host::qword_ptr( asmjit::host::rsp, 1 * sizeof( uint64_t ) ), asmjit::host::rcx ); _assembler.mov( asmjit::host::qword_ptr( asmjit::host::rsp, 2 * sizeof( uint64_t ) ), asmjit::host::rdx ); _assembler.mov( asmjit::host::qword_ptr( asmjit::host::rsp, 3 * sizeof( uint64_t ) ), asmjit::host::r8 ); _assembler.mov( asmjit::host::qword_ptr( asmjit::host::rsp, 4 * sizeof( uint64_t ) ), asmjit::host::r9 ); } } /// /// Generate function epilogue code /// /// true if execution must be swithed to x86 mode /// Stack change value void AsmHelper64::GenEpilogue( bool switchMode /*= false*/, int /*retSize = 0*/ ) { if (switchMode) { SwitchTo86(); } else { _assembler.mov( asmjit::host::rcx, asmjit::host::qword_ptr( asmjit::host::rsp, 1 * sizeof( uint64_t ) ) ); _assembler.mov( asmjit::host::rdx, asmjit::host::qword_ptr( asmjit::host::rsp, 2 * sizeof( uint64_t ) ) ); _assembler.mov( asmjit::host::r8, asmjit::host::qword_ptr( asmjit::host::rsp, 3 * sizeof( uint64_t ) ) ); _assembler.mov( asmjit::host::r9, asmjit::host::qword_ptr( asmjit::host::rsp, 4 * sizeof( uint64_t ) ) ); } _assembler.ret(); } /// /// Generate function call /// /// Function pointer /// Function arguments /// Ignored void AsmHelper64::GenCall( const AsmFunctionPtr& pFN, const std::vector& args, eCalligConvention /*cc = CC_stdcall*/ ) { // // reserve stack size (0x28 - minimal size for 4 registers and return address) // after call, stack must be aligned on 16 bytes boundary // size_t rsp_dif = (args.size() > 4) ? args.size() * sizeof( uint64_t ) : 0x28; // align on (16 bytes - sizeof(return address)) rsp_dif = Align( rsp_dif, 0x10 ); if (_stackEnabled) _assembler.sub( asmjit::host::rsp, rsp_dif + 8 ); // Set args for (int32_t i = 0; i < static_cast(args.size()); i++) PushArg( args[i], i ); if (pFN.type == AsmVariant::imm) { _assembler.mov( asmjit::host::rax, pFN.imm_val64 ); _assembler.call( asmjit::host::rax ); } else if (pFN.type == AsmVariant::reg) { _assembler.call( pFN.reg_val ); } else assert("Invalid function pointer type" && false ); if (_stackEnabled) _assembler.add( asmjit::host::rsp, rsp_dif + 8 ); } /// /// Save rax value and terminate current thread /// /// NtTerminateThread address /// Memory where rax value will be saved void AsmHelper64::ExitThreadWithStatus( uint64_t pExitThread, uint64_t resultPtr ) { if (resultPtr != 0) { _assembler.mov( asmjit::host::rdx, resultPtr ); _assembler.mov( asmjit::host::dword_ptr( asmjit::host::rdx ), asmjit::host::rax ); } _assembler.mov( asmjit::host::rdx, asmjit::host::rax ); _assembler.mov( asmjit::host::rcx, 0 ); _assembler.mov( asmjit::host::rax, pExitThread ); _assembler.call( asmjit::host::rax ); } /// /// Save return value and signal thread return event /// /// NtSetEvent address /// Result value memory location /// Event memory location /// Error code memory location /// Return type void AsmHelper64::SaveRetValAndSignalEvent( uint64_t pSetEvent, uint64_t ResultPtr, uint64_t EventPtr, uint64_t lastStatusPtr, eReturnType rtype /*= rt_int32*/ ) { _assembler.mov( asmjit::host::rcx, ResultPtr ); // FPU value has been already saved if (rtype == rt_int64 || rtype == rt_int32) _assembler.mov( asmjit::host::dword_ptr( asmjit::host::rcx ), asmjit::host::rax ); // Save last NT status _assembler.mov( asmjit::host::rdx, asmjit::host::dword_ptr_abs( 0x30 ).setSegment( asmjit::host::gs ) ); // TEB ptr _assembler.add( asmjit::host::rdx, 0x598 + 0x197 * sizeof( uint64_t ) ); _assembler.mov( asmjit::host::rdx, asmjit::host::dword_ptr( asmjit::host::rdx ) ); _assembler.mov( asmjit::host::rax, lastStatusPtr ); _assembler.mov( asmjit::host::dword_ptr( asmjit::host::rax ), asmjit::host::rdx ); // NtSetEvent(hEvent, NULL) _assembler.mov( asmjit::host::rax, EventPtr ); _assembler.mov( asmjit::host::rcx, asmjit::host::dword_ptr( asmjit::host::rax ) ); _assembler.mov( asmjit::host::rdx, 0 ); _assembler.mov( asmjit::host::rax, pSetEvent ); _assembler.call( asmjit::host::rax ); } /// /// Set stack reservation policy on call generation /// /// /// If true - stack space will be reserved during each call generation /// If false - no automatic stack reservation, user must allocate stack by hand /// void AsmHelper64::EnableX64CallStack( bool state ) { _stackEnabled = state; } /// /// Push function argument /// /// Argument. /// Push type(register or stack) void AsmHelper64::PushArg( const AsmVariant& arg, int32_t index ) { switch (arg.type) { case AsmVariant::imm: case AsmVariant::structRet: PushArgp( arg.imm_val64, index ); break; case AsmVariant::dataPtr: case AsmVariant::dataStruct: // Use new_imm_val when available. It's populated by remote call engine. PushArgp( arg.new_imm_val != 0 ? arg.new_imm_val : arg.imm_val64, index ); break; case AsmVariant::imm_double: PushArgp( arg.getImm_double(), index, true ); break; case AsmVariant::imm_float: PushArgp( arg.getImm_float(), index, true ); break; case AsmVariant::mem_ptr: _assembler.lea( asmjit::host::rax, arg.mem_val ); PushArgp( asmjit::host::rax, index ); break; case AsmVariant::mem: PushArgp( arg.mem_val, index ); break; case AsmVariant::reg: PushArgp( arg.reg_val, index ); break; default: assert( "Invalid argument type" && false ); break; } } /// /// Push function argument /// /// Argument /// Argument index /// true if argument is a floating point value template void AsmHelper64::PushArgp( const _Type& arg, int32_t index, bool fpu /*= false*/ ) { static const asmjit::GpReg regs[] = { asmjit::host::rcx, asmjit::host::rdx, asmjit::host::r8, asmjit::host::r9 }; static const asmjit::XmmReg xregs[] = { asmjit::host::xmm0, asmjit::host::xmm1, asmjit::host::xmm2, asmjit::host::xmm3 }; // Pass via register if (index < 4) { // Use XMM register if (fpu) { _assembler.mov( asmjit::host::rax, arg ); _assembler.movq( xregs[index], asmjit::host::rax ); } else _assembler.mov( regs[index], arg ); } // Pass on stack else { _assembler.mov( asmjit::host::rax, arg ); _assembler.mov( asmjit::host::qword_ptr( asmjit::host::rsp, index * sizeof( uint64_t ) ), asmjit::host::rax ); } } }