diff --git a/Utilities/JIT.h b/Utilities/JIT.h index 1877b10946..914474d93e 100644 --- a/Utilities/JIT.h +++ b/Utilities/JIT.h @@ -211,8 +211,6 @@ namespace asmjit static_cast(args); #endif } - - using imm_ptr = Imm; } // Build runtime function with asmjit::X86Assembler diff --git a/Utilities/Thread.cpp b/Utilities/Thread.cpp index 34284417eb..a69985de98 100644 --- a/Utilities/Thread.cpp +++ b/Utilities/Thread.cpp @@ -2260,17 +2260,17 @@ thread_base::native_entry thread_base::make_trampoline(u64(*entry)(thread_base* c.sub(x86::rsp, 0x20); // Call entry point (TODO: support for detached threads missing?) - c.call(imm_ptr(entry)); + c.call(entry); // Call finalize, return if zero c.mov(args[0], x86::rax); - c.call(imm_ptr(static_cast(&finalize))); + c.call(static_cast(&finalize)); c.test(x86::rax, x86::rax); c.jz(_ret); // Otherwise, call it as an entry point with first arg = new current thread c.mov(x86::rbp, x86::rax); - c.call(imm_ptr(thread_ctrl::get_current)); + c.call(thread_ctrl::get_current); c.mov(args[0], x86::rax); c.add(x86::rsp, 0x28); c.jmp(x86::rbp); diff --git a/rpcs3/Emu/Cell/PPUFunction.cpp b/rpcs3/Emu/Cell/PPUFunction.cpp index 853a41a49d..750058b5de 100644 --- a/rpcs3/Emu/Cell/PPUFunction.cpp +++ b/rpcs3/Emu/Cell/PPUFunction.cpp @@ -1918,7 +1918,7 @@ std::vector& ppu_function_manager::access(bool ghc) c.mov(args[0], x86::rbp); c.mov(args[2].r32(), x86::dword_ptr(args[0], ::offset32(&ppu_thread::cia))); c.add(args[2], x86::qword_ptr(reinterpret_cast(&vm::g_base_addr))); - c.jmp(imm_ptr(list[0])); + c.jmp(list[0]); }), build_function_asm("ppu_return", [](native_asm& c, auto& args) { @@ -1928,7 +1928,7 @@ std::vector& ppu_function_manager::access(bool ghc) c.mov(args[0], x86::rbp); c.mov(args[2].r32(), x86::dword_ptr(args[0], ::offset32(&ppu_thread::cia))); c.add(args[2], x86::qword_ptr(reinterpret_cast(&vm::g_base_addr))); - c.jmp(imm_ptr(list[1])); + c.jmp(list[1]); }), }; #elif defined(ARCH_ARM64) @@ -1955,7 +1955,7 @@ u32 ppu_function_manager::add_function(ppu_intrp_func_t function) c.mov(args[0], x86::rbp); c.mov(args[2].r32(), x86::dword_ptr(args[0], ::offset32(&ppu_thread::cia))); c.add(args[2], x86::qword_ptr(reinterpret_cast(&vm::g_base_addr))); - c.jmp(imm_ptr(function)); + c.jmp(function); })); #elif defined(ARCH_ARM64) list2.push_back(function); diff --git a/rpcs3/Emu/Cell/PPUThread.cpp b/rpcs3/Emu/Cell/PPUThread.cpp index 4e37419f5e..57f425c5e9 100644 --- a/rpcs3/Emu/Cell/PPUThread.cpp +++ b/rpcs3/Emu/Cell/PPUThread.cpp @@ -273,7 +273,7 @@ const auto ppu_recompiler_fallback_ghc = build_function_asmbind(label_diff); c->inc(SPU_OFF_64(block_failure)); c->add(x86::rsp, 0x28); - c->jmp(imm_ptr(spu_runtime::tr_dispatch)); + c->jmp(spu_runtime::tr_dispatch); } for (auto&& work : ::as_rvalue(std::move(after))) @@ -1014,7 +1014,7 @@ void spu_recompiler::branch_fixed(u32 target, bool absolute) c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(*arg0, *cpu); - c->call(imm_ptr(&check_state)); + c->call(&check_state); c->jmp(local->second); if (absolute) @@ -1047,7 +1047,7 @@ void spu_recompiler::branch_fixed(u32 target, bool absolute) if (ppptr) { c->add(x86::rsp, 0x28); - c->jmp(imm_ptr(ppptr)); + c->jmp(ppptr); } else { @@ -1092,7 +1092,7 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret) c->mov(SPU_OFF_32(pc), *addr); c->mov(*arg0, *cpu); c->add(x86::rsp, 0x28); - c->jmp(imm_ptr(+_throw)); + c->jmp(+_throw); // Save addr in srr0 and disable interrupts c->bind(intr); @@ -1159,7 +1159,7 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret) if (ppptr) { c->add(x86::rsp, 0x28); - c->jmp(imm_ptr(ppptr)); + c->jmp(ppptr); } else { @@ -1230,9 +1230,9 @@ void spu_recompiler::fall(spu_opcode_t op) c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), op.opcode); - c->mov(*qw0, asmjit::imm_ptr(g_fxo->get().decode(op.opcode))); + c->mov(*qw0, g_fxo->get().decode(op.opcode)); c->mov(*arg0, *cpu); - c->call(asmjit::imm_ptr(+gate)); + c->call(+gate); } void spu_recompiler::UNK(spu_opcode_t op) @@ -1250,7 +1250,7 @@ void spu_recompiler::UNK(spu_opcode_t op) c->mov(arg1->r32(), op.opcode); c->mov(*arg0, *cpu); c->add(asmjit::x86::rsp, 0x28); - c->jmp(asmjit::imm_ptr(+gate)); + c->jmp(+gate); m_pos = -1; } @@ -1278,7 +1278,7 @@ void spu_recompiler::STOP(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), op.opcode & 0x3fff); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_stop)); + c->call(spu_stop); c->align(AlignMode::kCode, 16); c->bind(ret); @@ -1366,7 +1366,7 @@ void spu_recompiler::RDCH(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_rdch)); + c->call(spu_rdch); c->jmp(ret); }); @@ -1471,7 +1471,7 @@ void spu_recompiler::RDCH(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->lea(*arg1, SPU_OFF_128(gpr, op.rt)); c->mov(*arg0, *cpu); - c->call(asmjit::imm_ptr(g_cfg.core.spu_loop_detection ? +sub1 : +sub2)); + c->call(g_cfg.core.spu_loop_detection ? +sub1 : +sub2); return; } case SPU_RdEventMask: @@ -1508,7 +1508,7 @@ void spu_recompiler::RDCH(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_rdch)); + c->call(spu_rdch); c->movd(x86::xmm0, *addr); c->pslldq(x86::xmm0, 12); c->movdqa(SPU_OFF_128(gpr, op.rt), x86::xmm0); @@ -1616,7 +1616,7 @@ void spu_recompiler::RCHCNT(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_rchcnt)); + c->call(spu_rchcnt); break; } } @@ -2317,7 +2317,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_wrch)); + c->call(spu_wrch); c->jmp(ret); }); @@ -2344,7 +2344,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), MFC_WrTagMask); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_wrch)); + c->call(spu_wrch); c->jmp(ret); }); @@ -2368,7 +2368,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) c->mov(SPU_OFF_32(pc), *addr); c->mov(arg1->r32(), +op.ra); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_wrch)); + c->call(spu_wrch); c->jmp(ret); c->bind(zero); @@ -2434,7 +2434,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) c->and_(*addr, 0x3fffc); c->mov(SPU_OFF_32(pc), *addr); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_wrch_mfc)); + c->call(spu_wrch_mfc); return; } case MFC_WrListStallAck: @@ -2459,7 +2459,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) c->btr(SPU_OFF_32(ch_stall_mask), arg1->r32()); c->jnc(ret); c->mov(*arg0, *cpu); - c->call(imm_ptr(+sub)); + c->call(+sub); c->bind(ret); return; } @@ -2471,7 +2471,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) }; c->mov(*arg0, *cpu); - c->call(imm_ptr(+sub)); + c->call(+sub); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(SPU_OFF_32(ch_dec_value), qw0->r32()); return; @@ -2501,7 +2501,7 @@ void spu_recompiler::WRCH(spu_opcode_t op) c->mov(arg1->r32(), +op.ra); c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3)); c->mov(*arg0, *cpu); - c->call(imm_ptr(spu_wrch)); + c->call(spu_wrch); } void spu_recompiler::BIZ(spu_opcode_t op) @@ -2652,7 +2652,7 @@ void spu_recompiler::BISLED(spu_opcode_t op) asmjit::Label branch_label = c->newLabel(); c->mov(*arg0, *cpu); - c->call(asmjit::imm_ptr(+get_events)); + c->call(+get_events); c->test(*addr, 1); c->jne(branch_label); @@ -2792,7 +2792,7 @@ void spu_recompiler::ROTQBYBI(spu_opcode_t op) } const XmmLink& va = XmmGet(op.ra, XmmType::Int); - c->mov(*qw0, asmjit::imm_ptr(+g_spu_imm.rldq_pshufb)); + c->mov(*qw0, +g_spu_imm.rldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0xf << 3); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64(), 1)); @@ -2807,7 +2807,7 @@ void spu_recompiler::ROTQMBYBI(spu_opcode_t op) } const XmmLink& va = XmmGet(op.ra, XmmType::Int); - c->mov(*qw0, asmjit::imm_ptr(+g_spu_imm.srdq_pshufb)); + c->mov(*qw0, +g_spu_imm.srdq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f << 3); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64(), 1)); @@ -2822,7 +2822,7 @@ void spu_recompiler::SHLQBYBI(spu_opcode_t op) } const XmmLink& va = XmmGet(op.ra, XmmType::Int); - c->mov(*qw0, asmjit::imm_ptr(+g_spu_imm.sldq_pshufb)); + c->mov(*qw0, +g_spu_imm.sldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f << 3); c->pshufb(va, asmjit::x86::oword_ptr(*qw0, addr->r64(), 1)); @@ -2945,7 +2945,7 @@ void spu_recompiler::ROTQBY(spu_opcode_t op) } const XmmLink& va = XmmGet(op.ra, XmmType::Int); - c->mov(*qw0, asmjit::imm_ptr(+g_spu_imm.rldq_pshufb)); + c->mov(*qw0, +g_spu_imm.rldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0xf); c->shl(*addr, 4); @@ -2961,7 +2961,7 @@ void spu_recompiler::ROTQMBY(spu_opcode_t op) } const XmmLink& va = XmmGet(op.ra, XmmType::Int); - c->mov(*qw0, asmjit::imm_ptr(+g_spu_imm.srdq_pshufb)); + c->mov(*qw0, +g_spu_imm.srdq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f); c->shl(*addr, 4); @@ -2977,7 +2977,7 @@ void spu_recompiler::SHLQBY(spu_opcode_t op) } const XmmLink& va = XmmGet(op.ra, XmmType::Int); - c->mov(*qw0, asmjit::imm_ptr(+g_spu_imm.sldq_pshufb)); + c->mov(*qw0, +g_spu_imm.sldq_pshufb); c->mov(*addr, SPU_OFF_32(gpr, op.rb, &v128::_u32, 3)); c->and_(*addr, 0x1f); c->shl(*addr, 4); diff --git a/rpcs3/Emu/Cell/SPURecompiler.cpp b/rpcs3/Emu/Cell/SPURecompiler.cpp index a0d75b7cc1..b2e8db8246 100644 --- a/rpcs3/Emu/Cell/SPURecompiler.cpp +++ b/rpcs3/Emu/Cell/SPURecompiler.cpp @@ -207,7 +207,7 @@ DECLARE(spu_runtime::g_gateway) = built_function("spu_gateway", c.vzeroupper(); } - c.call(asmjit::imm_ptr(spu_runtime::tr_all)); + c.call(spu_runtime::tr_all); if (utils::has_avx()) { diff --git a/rpcs3/Emu/RSX/Common/BufferUtils.cpp b/rpcs3/Emu/RSX/Common/BufferUtils.cpp index 34c756b160..1593f0d922 100644 --- a/rpcs3/Emu/RSX/Common/BufferUtils.cpp +++ b/rpcs3/Emu/RSX/Common/BufferUtils.cpp @@ -269,17 +269,17 @@ namespace if (utils::has_ssse3()) { - c.jmp(asmjit::imm_ptr(©_data_swap_u32_ssse3)); + c.jmp(©_data_swap_u32_ssse3); return; } - c.jmp(asmjit::imm_ptr(©_data_swap_u32_naive)); + c.jmp(©_data_swap_u32_naive); } #elif defined(ARCH_ARM64) template void build_copy_data_swap_u32(native_asm& c, native_args& args) { - c.b(asmjit::imm_ptr(©_data_swap_u32_naive)); + c.b(©_data_swap_u32_naive); } #endif }