diff --git a/rpcs3/Emu/CPU/CPUThread.cpp b/rpcs3/Emu/CPU/CPUThread.cpp index ba398cad80..51d3028060 100644 --- a/rpcs3/Emu/CPU/CPUThread.cpp +++ b/rpcs3/Emu/CPU/CPUThread.cpp @@ -20,7 +20,6 @@ CPUThread::CPUThread(CPUThreadType type) , m_type(type) , m_stack_size(0) , m_stack_addr(0) - , m_offset(0) , m_prio(0) , m_dec(nullptr) , m_is_step(false) @@ -30,6 +29,7 @@ CPUThread::CPUThread(CPUThreadType type) , m_trace_enabled(false) , m_trace_call_stack(true) { + offset = 0; } CPUThread::~CPUThread() @@ -125,11 +125,9 @@ void CPUThread::Reset() CloseStack(); SetPc(0); - cycle = 0; m_is_branch = false; m_status = Stopped; - m_error = 0; DoReset(); } @@ -202,29 +200,6 @@ void CPUThread::SetPc(const u32 pc) PC = pc; } -void CPUThread::SetError(const u32 error) -{ - if(error == 0) - { - m_error = 0; - } - else - { - m_error |= error; - } -} - -std::vector CPUThread::ErrorToString(const u32 error) -{ - std::vector earr; - - if(error == 0) return earr; - - earr.push_back("Unknown error"); - - return earr; -} - void CPUThread::Run() { if(!IsStopped()) @@ -322,7 +297,7 @@ void CPUThread::Task() for (uint i = 0; iDecodeMemory(PC + m_offset)); + NextPc(m_dec->DecodeMemory(PC + offset)); if (status == CPUThread_Step) { diff --git a/rpcs3/Emu/CPU/CPUThread.h b/rpcs3/Emu/CPU/CPUThread.h index 8731894a25..7085227f84 100644 --- a/rpcs3/Emu/CPU/CPUThread.h +++ b/rpcs3/Emu/CPU/CPUThread.h @@ -26,10 +26,8 @@ class CPUThread : public ThreadBase { protected: u32 m_status; - u32 m_error; u32 m_id; u64 m_prio; - u32 m_offset; CPUThreadType m_type; bool m_joinable; bool m_joining; @@ -61,12 +59,10 @@ public: void SetId(const u32 id); void SetName(const std::string& name); void SetPrio(const u64 prio) { m_prio = prio; } - void SetOffset(const u32 offset) { m_offset = offset; } void SetExitStatus(const u64 status) { m_exit_status = status; } - u32 GetOffset() const { return m_offset; } - u64 GetExitStatus() const { return m_exit_status; } u64 GetPrio() const { return m_prio; } + u64 GetExitStatus() const { return m_exit_status; } std::string GetName() const { return NamedThreadBase::GetThreadName(); } std::string GetFName() const @@ -116,7 +112,7 @@ public: u32 entry; u32 PC; u32 nPC; - u64 cycle; + u32 offset; bool m_is_branch; bool m_trace_enabled; @@ -138,12 +134,6 @@ public: void SetPc(const u32 pc); void SetEntry(const u32 entry); - void SetError(const u32 error); - - static std::vector ErrorToString(const u32 error); - std::vector ErrorToString() { return ErrorToString(m_error); } - - bool IsOk() const { return m_error == 0; } bool IsRunning() const; bool IsPaused() const; bool IsStopped() const; @@ -153,7 +143,6 @@ public: void SetJoinable(bool joinable) { m_joinable = joinable; } void SetJoining(bool joining) { m_joining = joining; } - u32 GetError() const { return m_error; } u32 GetId() const { return m_id; } CPUThreadType GetType() const { return m_type; } diff --git a/rpcs3/Emu/Cell/MFC.cpp b/rpcs3/Emu/Cell/MFC.cpp index f63341b79c..b79641773b 100644 --- a/rpcs3/Emu/Cell/MFC.cpp +++ b/rpcs3/Emu/Cell/MFC.cpp @@ -1,2 +1,47 @@ #include "stdafx.h" #include "MFC.h" + +const char* get_mfc_cmd_name(u32 cmd) +{ + switch (cmd) + { + case MFC_PUT_CMD: return "PUT"; + case MFC_PUTB_CMD: return "PUTB"; + case MFC_PUTF_CMD: return "PUTF"; + case MFC_PUTS_CMD: return "PUTS"; + case MFC_PUTBS_CMD: return "PUTBS"; + case MFC_PUTFS_CMD: return "PUTFS"; + case MFC_PUTR_CMD: return "PUTR"; + case MFC_PUTRB_CMD: return "PUTRB"; + case MFC_PUTRF_CMD: return "PUTRF"; + case MFC_GET_CMD: return "GET"; + case MFC_GETB_CMD: return "GETB"; + case MFC_GETF_CMD: return "GETF"; + case MFC_GETS_CMD: return "GETS"; + case MFC_GETBS_CMD: return "GETBS"; + case MFC_GETFS_CMD: return "GETFS"; + case MFC_PUTL_CMD: return "PUTL"; + case MFC_PUTLB_CMD: return "PUTLB"; + case MFC_PUTLF_CMD: return "PUTLF"; + case MFC_PUTRL_CMD: return "PUTRL"; + case MFC_PUTRLB_CMD: return "PUTRLB"; + case MFC_PUTRLF_CMD: return "PUTRLF"; + case MFC_GETL_CMD: return "GETL"; + case MFC_GETLB_CMD: return "GETLB"; + case MFC_GETLF_CMD: return "GETLF"; + + case MFC_GETLLAR_CMD: return "GETLLAR"; + case MFC_PUTLLC_CMD: return "PUTLLC"; + case MFC_PUTLLUC_CMD: return "PUTLLUC"; + case MFC_PUTQLLUC_CMD: return "PUTQLLUC"; + + case MFC_SNDSIG_CMD: return "SNDSIG"; + case MFC_SNDSIGB_CMD: return "SNDSIGB"; + case MFC_SNDSIGF_CMD: return "SNDSIGF"; + case MFC_BARRIER_CMD: return "BARRIER"; + case MFC_EIEIO_CMD: return "EIEIO"; + case MFC_SYNC_CMD: return "SYNC"; + } + + return "UNKNOWN"; +} diff --git a/rpcs3/Emu/Cell/MFC.h b/rpcs3/Emu/Cell/MFC.h index 0b669deb97..8d03c03cdf 100644 --- a/rpcs3/Emu/Cell/MFC.h +++ b/rpcs3/Emu/Cell/MFC.h @@ -1,10 +1,14 @@ #pragma once -enum +const char* get_mfc_cmd_name(u32 cmd); + +enum : u32 { MFC_PUT_CMD = 0x20, MFC_PUTB_CMD = 0x21, MFC_PUTF_CMD = 0x22, + MFC_PUTS_CMD = 0x28, MFC_PUTBS_CMD = 0x29, MFC_PUTFS_CMD = 0x2a, MFC_PUTR_CMD = 0x30, MFC_PUTRB_CMD = 0x31, MFC_PUTRF_CMD = 0x32, MFC_GET_CMD = 0x40, MFC_GETB_CMD = 0x41, MFC_GETF_CMD = 0x42, + MFC_GETS_CMD = 0x48, MFC_GETBS_CMD = 0x49, MFC_GETFS_CMD = 0x4a, MFC_PUTL_CMD = 0x24, MFC_PUTLB_CMD = 0x25, MFC_PUTLF_CMD = 0x26, MFC_PUTRL_CMD = 0x34, MFC_PUTRLB_CMD = 0x35, MFC_PUTRLF_CMD = 0x36, MFC_GETL_CMD = 0x44, MFC_GETLB_CMD = 0x45, MFC_GETLF_CMD = 0x46, @@ -21,52 +25,68 @@ enum MFC_BARRIER_MASK = 0x01, MFC_FENCE_MASK = 0x02, MFC_LIST_MASK = 0x04, - MFC_START_MASK = 0x08, // ??? + MFC_START_MASK = 0x08, MFC_RESULT_MASK = 0x10, // ??? - MFC_MASK_CMD = 0xffff, }; // Atomic Status Update -enum +enum : u32 { MFC_PUTLLC_SUCCESS = 0, - MFC_PUTLLC_FAILURE = 1, //reservation was lost + MFC_PUTLLC_FAILURE = 1, // reservation was lost MFC_PUTLLUC_SUCCESS = 2, MFC_GETLLAR_SUCCESS = 4, }; // MFC Write Tag Status Update Request Channel (ch23) operations -enum +enum : u32 { MFC_TAG_UPDATE_IMMEDIATE = 0, MFC_TAG_UPDATE_ANY = 1, MFC_TAG_UPDATE_ALL = 2, }; -enum -{ - MFC_SPU_TO_PPU_MAILBOX_STATUS_MASK = 0x000000FF, - MFC_SPU_TO_PPU_MAILBOX_STATUS_SHIFT = 0x0, - MFC_PPU_TO_SPU_MAILBOX_STATUS_MASK = 0x0000FF00, - MFC_PPU_TO_SPU_MAILBOX_STATUS_SHIFT = 0x8, - MFC_PPU_TO_SPU_MAILBOX_MAX = 0x4, - MFC_SPU_TO_PPU_INT_MAILBOX_STATUS_MASK = 0x00FF0000, - MFC_SPU_TO_PPU_INT_MAILBOX_STATUS_SHIFT = 0x10, -}; - -enum +enum : u32 { MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL = 0x00, MFC_PPU_DMA_CMD_SEQUENCE_ERROR = 0x01, MFC_PPU_DMA_QUEUE_FULL = 0x02, }; -enum +enum : u32 +{ + MFC_PROXY_COMMAND_QUEUE_EMPTY_FLAG = 0x80000000, +}; + +enum : u32 { MFC_PPU_MAX_QUEUE_SPACE = 0x08, MFC_SPU_MAX_QUEUE_SPACE = 0x10, }; -struct DMAC +struct spu_mfc_arg_t { + union + { + u64 ea; + + struct + { + u32 eal; + u32 eah; + }; + }; + + u32 lsa; + + union + { + struct + { + u16 tag; + u16 size; + }; + + u32 size_tag; + }; }; diff --git a/rpcs3/Emu/Cell/RawSPUThread.cpp b/rpcs3/Emu/Cell/RawSPUThread.cpp index 94f34c3fa0..66540fd769 100644 --- a/rpcs3/Emu/Cell/RawSPUThread.cpp +++ b/rpcs3/Emu/Cell/RawSPUThread.cpp @@ -6,6 +6,8 @@ #include "Emu/Cell/RawSPUThread.h" +thread_local spu_mfc_arg_t raw_spu_mfc[8] = {}; + RawSPUThread::RawSPUThread(CPUThreadType type) : SPUThread(type) , MemoryBlock() @@ -19,6 +21,18 @@ RawSPUThread::~RawSPUThread() Memory.CloseRawSPU(this, m_index); } +void RawSPUThread::start() +{ + status.write_relaxed(SPU_STATUS_RUNNING); + + // calling Exec() directly in SIGSEGV handler may cause problems + // (probably because Exec() creates new thread, faults of this thread aren't handled by this handler anymore) + Emu.GetCallbackManager().Async([this](PPUThread& PPU) + { + Exec(); + }); +} + bool RawSPUThread::Read32(const u32 addr, u32* value) { const u32 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET; @@ -27,45 +41,37 @@ bool RawSPUThread::Read32(const u32 addr, u32* value) { case MFC_CMDStatus_offs: { - *value = MFC2.CMDStatus.GetValue(); - break; + *value = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL; + return true; } case MFC_QStatus_offs: { - // TagStatus is not used: mask is written directly - *value = MFC2.QueryMask.GetValue(); - break; + *value = MFC_PROXY_COMMAND_QUEUE_EMPTY_FLAG | MFC_PPU_MAX_QUEUE_SPACE; + return true; } case SPU_Out_MBox_offs: { - // if Out_MBox is empty, the result is undefined - SPU.Out_MBox.PopUncond(*value); - break; + *value = ch_out_mbox.pop_uncond(); + return true; } case SPU_MBox_Status_offs: { - *value = (SPU.Out_MBox.GetCount() & 0xff) | (SPU.In_MBox.GetFreeCount() << 8) | (SPU.Out_IntrMBox.GetCount() << 16); - break; + *value = (ch_out_mbox.get_count() & 0xff) | ((4 - ch_in_mbox.get_count()) << 8 & 0xff) | (ch_out_intr_mbox.get_count() << 16 & 0xff); + return true; } case SPU_Status_offs: { - *value = SPU.Status.GetValue(); - break; - } - - default: - { - // TODO: read value from LS if necessary (not important) - LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Read32(0x%llx)", m_index, offset); - return false; + *value = status.read_relaxed(); + return true; } } - return true; + LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Read32(): unknown/illegal offset (0x%x)", m_index, offset); + return false; } bool RawSPUThread::Write32(const u32 addr, const u32 value) @@ -76,129 +82,134 @@ bool RawSPUThread::Write32(const u32 addr, const u32 value) { case MFC_LSA_offs: { - MFC2.LSA.SetValue(value); - break; + if (value >= 0x40000) + { + break; + } + + raw_spu_mfc[m_index].lsa = value; + return true; } case MFC_EAH_offs: { - MFC2.EAH.SetValue(value); - break; + raw_spu_mfc[m_index].eah = value; + return true; } case MFC_EAL_offs: { - MFC2.EAL.SetValue(value); - break; + raw_spu_mfc[m_index].eal = value; + return true; } case MFC_Size_Tag_offs: { - MFC2.Size_Tag.SetValue(value); - break; + if (value >> 16 > 16 * 1024 || (u16)value >= 32) + { + break; + } + + raw_spu_mfc[m_index].size_tag = value; + return true; } - case MFC_CMDStatus_offs: + case MFC_Class_CMD_offs: { - MFC2.CMDStatus.SetValue(value); - EnqMfcCmd(MFC2); - break; + do_dma_transfer(value & ~MFC_START_MASK, raw_spu_mfc[m_index]); + raw_spu_mfc[m_index] = {}; // clear non-persistent data + + if (value & MFC_START_MASK) + { + start(); + } + + return true; } case Prxy_QueryType_offs: { - switch(value) - { - case 2: break; + // 0 - no query requested; cancel previous request + // 1 - set (interrupt) status upon completion of any enabled tag groups + // 2 - set (interrupt) status upon completion of all enabled tag groups - default: + if (value > 2) { - LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Unknown Prxy Query Type. (prxy_query=0x%x)", m_index, value); - return false; - } + break; } - MFC2.QueryType.SetValue(value); // not used - break; + if (value) + { + int2.set(SPU_INT2_STAT_DMA_TAG_GROUP_COMPLETION_INT); // TODO + } + + return true; } case Prxy_QueryMask_offs: { - MFC2.QueryMask.SetValue(value); // TagStatus is not used - break; + //proxy_tag_mask = value; + return true; } case SPU_In_MBox_offs: { - // if In_MBox is already full, the last message is overwritten - SPU.In_MBox.PushUncond(value); - break; + ch_in_mbox.push_uncond(value); + return true; } case SPU_RunCntl_offs: { - if (value == SPU_RUNCNTL_RUNNABLE) + if (value == SPU_RUNCNTL_RUN_REQUEST) { - // calling Exec() directly in SIGSEGV handler may cause problems - // (probably because Exec() creates new thread, faults of this thread aren't handled by this handler anymore) - Emu.GetCallbackManager().Async([this](PPUThread& PPU) - { - SPU.Status.SetValue(SPU_STATUS_RUNNING); - Exec(); - }); - + start(); } - else if (value == SPU_RUNCNTL_STOP) + else if (value == SPU_RUNCNTL_STOP_REQUEST) { - SPU.Status.SetValue(SPU_STATUS_STOPPED); + status &= ~SPU_STATUS_RUNNING; Stop(); } else { - LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Write32(SPU_RunCtrl, 0x%x): unknown value", m_index, value); - return false; + break; } - break; + + run_ctrl.write_relaxed(value); + return true; } case SPU_NPC_offs: { - if (value & 3) + // check if interrupts are enabled + if ((value & 3) != 1 || value >= 0x40000) { - // least significant bit contains some interrupt flag - LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Write32(SPU_NPC_offs, 0x%x): lowest bits set", m_index, value); - return false; + break; } - SPU.NPC.SetValue(value); - break; + + npc.write_relaxed(value); + return true; } case SPU_RdSigNotify1_offs: { - WriteSNR(0, value); - break; + write_snr(0, value); + return true; } case SPU_RdSigNotify2_offs: { - WriteSNR(1, value); - break; - } - - default: - { - // TODO: write value to LS if necessary (not important) - LOG_ERROR(Log::SPU, "RawSPUThread[%d]: Write32(0x%llx, 0x%x)", m_index, offset, value); - return false; + write_snr(1, value); + return true; } } - return true; + LOG_ERROR(SPU, "RawSPUThread[%d]: Write32(value=0x%x): unknown/illegal offset (0x%x)", m_index, value, offset); + return false; } void RawSPUThread::InitRegs() { - ls_offset = m_offset = GetStartAddr() + RAW_SPU_LS_OFFSET; + offset = GetStartAddr() + RAW_SPU_LS_OFFSET; SPUThread::InitRegs(); } @@ -209,9 +220,9 @@ u32 RawSPUThread::GetIndex() const void RawSPUThread::Task() { - PC = SPU.NPC.GetValue(); + PC = npc.exchange(0) & ~3; SPUThread::Task(); - SPU.NPC.SetValue(PC); + npc.write_relaxed(PC | 1); } diff --git a/rpcs3/Emu/Cell/RawSPUThread.h b/rpcs3/Emu/Cell/RawSPUThread.h index 7c166c6468..ff65632bc4 100644 --- a/rpcs3/Emu/Cell/RawSPUThread.h +++ b/rpcs3/Emu/Cell/RawSPUThread.h @@ -16,6 +16,8 @@ public: RawSPUThread(CPUThreadType type = CPU_THREAD_RAW_SPU); virtual ~RawSPUThread(); + void start(); + bool Read32(const u32 addr, u32* value); bool Write32(const u32 addr, const u32 value); diff --git a/rpcs3/Emu/Cell/SPUContext.h b/rpcs3/Emu/Cell/SPUContext.h new file mode 100644 index 0000000000..93a2464038 --- /dev/null +++ b/rpcs3/Emu/Cell/SPUContext.h @@ -0,0 +1,10 @@ +#pragma once + +class SPUThread; + +struct SPUContext +{ + u128 gpr[128]; + + SPUThread& thread; +}; diff --git a/rpcs3/Emu/Cell/SPUInterpreter.h b/rpcs3/Emu/Cell/SPUInterpreter.h index 774478c1d2..f17a292f6f 100644 --- a/rpcs3/Emu/Cell/SPUInterpreter.h +++ b/rpcs3/Emu/Cell/SPUInterpreter.h @@ -94,7 +94,7 @@ private: //0 - 10 void STOP(u32 code) { - CPU.StopAndSignal(code); + CPU.stop_and_signal(code); LOG2_OPCODE(); } void LNOP() @@ -116,12 +116,11 @@ private: } void RDCH(u32 rt, u32 ra) { - CPU.ReadChannel(CPU.GPR[rt], ra); + CPU.GPR[rt]._u32[3] = CPU.get_ch_value(ra); } void RCHCNT(u32 rt, u32 ra) { - CPU.GPR[rt].clear(); - CPU.GPR[rt]._u32[3] = CPU.GetChannelCount(ra); + CPU.GPR[rt]._u32[3] = CPU.get_ch_count(ra); } void SF(u32 rt, u32 ra, u32 rb) { @@ -312,7 +311,7 @@ private: } void WRCH(u32 ra, u32 rt) { - CPU.WriteChannel(ra, CPU.GPR[rt]); + CPU.set_ch_value(ra, CPU.GPR[rt]._u32[3]); } void BIZ(u32 intr, u32 rt, u32 ra) { @@ -406,7 +405,7 @@ private: { u32 lsa = (CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3]) & 0x3fff0; - CPU.WriteLS128(lsa, CPU.GPR[rt]); + CPU.write128(lsa, CPU.GPR[rt]); } void BI(u32 intr, u32 ra) { @@ -536,7 +535,7 @@ private: { u32 lsa = (CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3]) & 0x3fff0; - CPU.GPR[rt] = CPU.ReadLS128(lsa); + CPU.GPR[rt] = CPU.read128(lsa); } void ROTQBYBI(u32 rt, u32 ra, u32 rb) { @@ -864,8 +863,7 @@ private: { if (CPU.GPR[ra]._s32[3] > CPU.GPR[rb]._s32[3]) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); } } void CLZ(u32 rt, u32 ra) @@ -1199,8 +1197,7 @@ private: { if (CPU.GPR[ra]._u32[3] > CPU.GPR[rb]._u32[3]) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); } } void DFMA(u32 rt, u32 ra, u32 rb, bool neg, bool sub) @@ -1453,8 +1450,7 @@ private: { if (CPU.GPR[ra]._s32[3] == CPU.GPR[rb]._s32[3]) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); } } @@ -1564,7 +1560,7 @@ private: { u32 lsa = (i16 << 2) & 0x3fff0; - CPU.WriteLS128(lsa, CPU.GPR[rt]); + CPU.write128(lsa, CPU.GPR[rt]); } void BRNZ(u32 rt, s32 i16) { @@ -1609,7 +1605,7 @@ private: { u32 lsa = branchTarget(CPU.PC, i16) & 0x3fff0; - CPU.WriteLS128(lsa, CPU.GPR[rt]); + CPU.write128(lsa, CPU.GPR[rt]); } void BRA(s32 i16) { @@ -1621,7 +1617,7 @@ private: { u32 lsa = (i16 << 2) & 0x3fff0; - CPU.GPR[rt] = CPU.ReadLS128(lsa); + CPU.GPR[rt] = CPU.read128(lsa); } void BRASL(u32 rt, s32 i16) { @@ -1665,7 +1661,7 @@ private: { u32 lsa = branchTarget(CPU.PC, i16) & 0x3fff0; - CPU.GPR[rt] = CPU.ReadLS128(lsa); + CPU.GPR[rt] = CPU.read128(lsa); } void IL(u32 rt, s32 i16) { @@ -1748,13 +1744,13 @@ private: { const u32 lsa = (CPU.GPR[ra]._s32[3] + i10) & 0x3fff0; - CPU.WriteLS128(lsa, CPU.GPR[rt]); + CPU.write128(lsa, CPU.GPR[rt]); } void LQD(u32 rt, s32 i10, u32 ra) //i10 is shifted left by 4 while decoding { const u32 lsa = (CPU.GPR[ra]._s32[3] + i10) & 0x3fff0; - CPU.GPR[rt] = CPU.ReadLS128(lsa); + CPU.GPR[rt] = CPU.read128(lsa); } void XORI(u32 rt, u32 ra, s32 i10) { @@ -1790,8 +1786,7 @@ private: { if (CPU.GPR[ra]._s32[3] > i10) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); } } void CLGTI(u32 rt, u32 ra, s32 i10) @@ -1817,8 +1812,7 @@ private: { if (CPU.GPR[ra]._u32[3] > (u32)i10) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); } } void MPYI(u32 rt, u32 ra, s32 i10) @@ -1850,8 +1844,7 @@ private: { if (CPU.GPR[ra]._s32[3] == i10) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); } } diff --git a/rpcs3/Emu/Cell/SPURecompilerCore.cpp b/rpcs3/Emu/Cell/SPURecompilerCore.cpp index a829019603..e476f80eb3 100644 --- a/rpcs3/Emu/Cell/SPURecompilerCore.cpp +++ b/rpcs3/Emu/Cell/SPURecompilerCore.cpp @@ -49,7 +49,7 @@ void SPURecompilerCore::Compile(u16 pos) u64 time0 = 0; SPUDisAsm dis_asm(CPUDisAsm_InterpreterMode); - dis_asm.offset = vm::get_ptr(CPU.ls_offset); + dis_asm.offset = vm::get_ptr(CPU.offset); StringLogger stringLogger; stringLogger.setOption(kLoggerOptionBinaryForm, true); @@ -103,7 +103,7 @@ void SPURecompilerCore::Compile(u16 pos) while (true) { - const u32 opcode = vm::read32(CPU.ls_offset + pos * 4); + const u32 opcode = vm::read32(CPU.offset + pos * 4); m_enc->do_finalize = false; if (opcode) { @@ -182,8 +182,8 @@ void SPURecompilerCore::Compile(u16 pos) u32 SPURecompilerCore::DecodeMemory(const u32 address) { - assert(CPU.ls_offset == address - CPU.PC); - const u32 m_offset = CPU.ls_offset; + assert(CPU.offset == address - CPU.PC); + const u32 m_offset = CPU.offset; const u16 pos = (u16)(CPU.PC >> 2); //ConLog.Write("DecodeMemory: pos=%d", pos); @@ -268,8 +268,7 @@ u32 SPURecompilerCore::DecodeMemory(const u32 address) if (res & 0x1000000) { - CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - CPU.Stop(); + CPU.halt(); res &= ~0x1000000; } diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index 0f044d4994..33ee856010 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -37,12 +37,6 @@ SPUThread::SPUThread(CPUThreadType type) : CPUThread(type) { assert(type == CPU_THREAD_SPU || type == CPU_THREAD_RAW_SPU); - group = nullptr; - for (auto& p : SPUPs) - { - p.reset(new EventPort()); - } - Reset(); } @@ -66,43 +60,57 @@ void SPUThread::Task() if (std::fegetround() != FE_TOWARDZERO) { - LOG_ERROR(Log::SPU, "Rounding mode has changed(%d)", std::fegetround()); + LOG_ERROR(SPU, "Rounding mode has changed(%d)", std::fegetround()); } std::fesetround(round); } void SPUThread::DoReset() { - //reset regs - memset(GPR, 0, sizeof(GPR)); } void SPUThread::InitRegs() { + memset(GPR, 0, sizeof(GPR)); + FPSCR.Reset(); + + ch_mfc_args = {}; + mfc_queue.clear(); + + ch_tag_mask = 0; + ch_tag_stat.data = {}; + ch_stall_stat.data = {}; + ch_atomic_stat.data = {}; + + ch_in_mbox.clear(); + + ch_out_mbox.data = {}; + ch_out_intr_mbox.data = {}; + + snr_config = 0; + + ch_snr1.data = {}; + ch_snr2.data = {}; + + ch_event_mask = 0; + ch_event_stat.write_relaxed(0); + + ch_dec_start_timestamp = get_time(); // ??? + ch_dec_value = 0; + + run_ctrl.write_relaxed(0); + status.write_relaxed(0); + + int0.clear(); + int2.clear(); + GPR[1]._u32[3] = 0x3FFF0; // initial stack frame pointer - - cfg.Reset(); - - ls_offset = m_offset; - - SPU.Status.SetValue(SPU_STATUS_STOPPED); - - // TODO: check initialization if necessary - MFC2.QueryType.SetValue(0); // prxy - MFC1.CMDStatus.SetValue(0); - MFC2.CMDStatus.SetValue(0); - MFC1.TagStatus.SetValue(0); - MFC2.TagStatus.SetValue(0); - //PC = SPU.NPC.GetValue(); - - m_event_mask = 0; - m_events = 0; } void SPUThread::InitStack() { - m_stack_size = 0x1000; // this value is wrong - m_stack_addr = m_offset + 0x40000 - m_stack_size; // stack is the part of SPU Local Storage + m_stack_size = 0x2000; // this value is wrong + m_stack_addr = offset + 0x40000 - m_stack_size; // stack is the part of SPU Local Storage } void SPUThread::CloseStack() @@ -122,7 +130,7 @@ void SPUThread::DoRun() break; default: - LOG_ERROR(Log::SPU, "Invalid SPU decoder mode: %d", Ini.SPUDecoderMode.GetValue()); + LOG_ERROR(SPU, "Invalid SPU decoder mode: %d", Ini.SPUDecoderMode.GetValue()); Emu.Pause(); } } @@ -143,27 +151,12 @@ void SPUThread::DoStop() void SPUThread::DoClose() { - // disconnect all event ports - if (Emu.IsStopped()) - { - return; - } - for (u32 i = 0; i < 64; i++) - { - std::shared_ptr port = SPUPs[i]; - std::lock_guard lock(port->m_mutex); - if (port->eq) - { - port->eq->ports.remove(port); - port->eq = nullptr; - } - } } void SPUThread::FastCall(u32 ls_addr) { // can't be called from another thread (because it doesn't make sense) - WriteLS32(0x0, 2); + write32(0x0, 2); auto old_PC = PC; auto old_LR = GPR[0]._u32[3]; @@ -185,336 +178,412 @@ void SPUThread::FastStop() m_status = Stopped; } -void SPUThread::WriteSNR(bool number, u32 value) -{ - if (cfg.value & ((u64)1 << (u64)number)) - { - SPU.SNR[number ? 1 : 0].PushUncond_OR(value); // logical OR - } - else - { - SPU.SNR[number ? 1 : 0].PushUncond(value); // overwrite - } -} - -#define LOG_DMAC(type, text) type(Log::SPU, "DMAC::ProcessCmd(cmd=0x%x, tag=0x%x, lsa=0x%x, ea=0x%llx, size=0x%x): " text, cmd, tag, lsa, ea, size) - -void SPUThread::ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size) +void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args) { if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK)) { _mm_mfence(); } - u32 eal = vm::cast(ea, "ea"); + u32 eal = vm::cast(args.ea, "ea"); - if (eal >= SYS_SPU_THREAD_BASE_LOW && group) // SPU Thread Group MMIO (LS and SNR) + if (eal >= SYS_SPU_THREAD_BASE_LOW && tg_id && m_type == CPU_THREAD_SPU) // SPU Thread Group MMIO (LS and SNR) { const u32 num = (eal & SYS_SPU_THREAD_BASE_MASK) / SYS_SPU_THREAD_OFFSET; // thread number in group const u32 offset = (eal & SYS_SPU_THREAD_BASE_MASK) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register std::shared_ptr t; + std::shared_ptr tg; - if (num < group->list.size() && group->list[num] && (t = Emu.GetCPU().GetThread(group->list[num])) && t->GetType() == CPU_THREAD_SPU) + if (Emu.GetIdManager().GetIDData(tg_id, tg) && num < tg->list.size() && tg->list[num] && (t = Emu.GetCPU().GetThread(tg->list[num])) && t->GetType() == CPU_THREAD_SPU) { SPUThread& spu = static_cast(*t); - if (offset + size - 1 < 0x40000) // LS access + if (offset + args.size - 1 < 0x40000) // LS access { - eal = spu.ls_offset + offset; // redirect access + eal = spu.offset + offset; // redirect access } - else if ((cmd & MFC_PUT_CMD) && size == 4 && (offset == SYS_SPU_THREAD_SNR1 || offset == SYS_SPU_THREAD_SNR2)) + else if ((cmd & MFC_PUT_CMD) && args.size == 4 && (offset == SYS_SPU_THREAD_SNR1 || offset == SYS_SPU_THREAD_SNR2)) { - spu.WriteSNR(SYS_SPU_THREAD_SNR2 == offset, vm::read32(ls_offset + lsa)); + spu.write_snr(SYS_SPU_THREAD_SNR2 == offset, vm::read32(offset + args.lsa)); return; } else { - LOG_DMAC(LOG_ERROR, "Invalid offset (SPU Thread Group MMIO)"); + LOG_ERROR(SPU, "do_dma_transfer(cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x): invalid MMIO offset", cmd, args.lsa, args.ea, args.tag, args.size); + throw ""; } } else { - LOG_DMAC(LOG_ERROR, "Invalid thread (SPU Thread Group MMIO)"); + LOG_ERROR(SPU, "do_dma_transfer(cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x): invalid thread type", cmd, args.lsa, args.ea, args.tag, args.size); + throw ""; } } - switch (cmd & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK | MFC_LIST_MASK | MFC_RESULT_MASK)) + switch (cmd & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK)) { case MFC_PUT_CMD: + case MFC_PUTR_CMD: { - memcpy(vm::get_ptr(eal), vm::get_ptr(ls_offset + lsa), size); + memcpy(vm::get_ptr(eal), vm::get_ptr(offset + args.lsa), args.size); return; } case MFC_GET_CMD: { - memcpy(vm::get_ptr(ls_offset + lsa), vm::get_ptr(eal), size); + memcpy(vm::get_ptr(offset + args.lsa), vm::get_ptr(eal), args.size); return; } + } - default: - { - LOG_DMAC(LOG_ERROR, "Unknown DMA command"); - Emu.Pause(); - return; - } - } + LOG_ERROR(SPU, "do_dma_transfer(cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x): invalid cmd (%s)", cmd, args.lsa, args.ea, args.tag, args.size, get_mfc_cmd_name(cmd)); + throw ""; } -#undef LOG_DMAC - -void SPUThread::ListCmd(u32 lsa, u64 ea, u16 tag, u16 size, u32 cmd, MFCReg& MFCArgs) +void SPUThread::do_dma_list_cmd(u32 cmd, spu_mfc_arg_t args) { - const u32 list_addr = ea & 0x3ffff; - const u32 list_size = size / 8; - lsa &= 0x3fff0; + if (!(cmd & MFC_LIST_MASK)) + { + LOG_ERROR(SPU, "do_dma_list_cmd(cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x): invalid cmd (%s)", cmd, args.lsa, args.ea, args.tag, args.size, get_mfc_cmd_name(cmd)); + throw ""; + } + + const u32 list_addr = args.ea & 0x3ffff; + const u32 list_size = args.size / 8; + args.lsa &= 0x3fff0; struct list_element { - be_t s; // Stall-and-Notify bit (0x8000) + be_t sb; // Stall-and-Notify bit (0x8000) be_t ts; // List Transfer Size be_t ea; // External Address Low }; - u32 result = MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL; - for (u32 i = 0; i < list_size; i++) { - auto rec = vm::ptr::make(ls_offset + list_addr + i * 8); + auto rec = vm::ptr::make(offset + list_addr + i * 8); const u32 size = rec->ts; - if (!(rec->s.data() & se16(0x8000)) && size < 16 && size != 1 && size != 2 && size != 4 && size != 8) - { - LOG_ERROR(Log::SPU, "DMA List: invalid transfer size(%d)", size); - result = MFC_PPU_DMA_CMD_SEQUENCE_ERROR; - break; - } - const u32 addr = rec->ea; - if (size) - { - ProcessCmd(cmd, tag, lsa | (addr & 0xf), addr, size); - } - - if (Ini.HLELogging.GetValue() || rec->s.data()) - { - LOG_NOTICE(Log::SPU, "*** list element(%d/%d): s=0x%x, ts=0x%x, eal=0x%x (lsa=0x%x)", i, list_size, rec->s, rec->ts, rec->ea, lsa | (addr & 0xf)); - } if (size) { - lsa += std::max(size, 16); + spu_mfc_arg_t transfer; + transfer.ea = addr; + transfer.lsa = args.lsa | (addr & 0xf); + transfer.tag = args.tag; + transfer.size = size; + + do_dma_transfer(cmd & ~MFC_LIST_MASK, transfer); + + args.lsa += std::max(size, 16); } - if (rec->s.data() & se16(0x8000)) + if (rec->sb.data() & se16(0x8000)) { - StallStat.PushUncond_OR(1 << tag); + ch_stall_stat.push_logical_or(1 << args.tag); - if (StallList[tag].MFCArgs) - { - LOG_ERROR(Log::SPU, "DMA List: existing stalled list found (tag=%d)", tag); - result = MFC_PPU_DMA_CMD_SEQUENCE_ERROR; - break; - } + spu_mfc_arg_t stalled; + stalled.ea = (args.ea & ~0xffffffff) | (list_addr + (i + 1) * 8); + stalled.lsa = args.lsa; + stalled.tag = args.tag; + stalled.size = (list_size - i - 1) * 8; - StallList[tag].MFCArgs = &MFCArgs; - StallList[tag].cmd = cmd; - StallList[tag].ea = (ea & ~0xffffffff) | (list_addr + (i + 1) * 8); - StallList[tag].lsa = lsa; - StallList[tag].size = (list_size - i - 1) * 8; - break; + mfc_queue.emplace_back(cmd, stalled); + return; } } - - MFCArgs.CMDStatus.SetValue(result); } -void SPUThread::EnqMfcCmd(MFCReg& MFCArgs) +void SPUThread::process_mfc_cmd(u32 cmd) { - u32 cmd = MFCArgs.CMDStatus.GetValue(); - u16 op = cmd & MFC_MASK_CMD; + if (Ini.HLELogging.GetValue()) + { + LOG_NOTICE(SPU, "DMA %s: cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x", get_mfc_cmd_name(cmd), ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size, cmd); + } - u32 lsa = MFCArgs.LSA.GetValue(); - u64 ea = (u64)MFCArgs.EAL.GetValue() | ((u64)MFCArgs.EAH.GetValue() << 32); - u32 size_tag = MFCArgs.Size_Tag.GetValue(); - u16 tag = (u16)size_tag; - u16 size = size_tag >> 16; - - switch (op & ~(MFC_BARRIER_MASK | MFC_FENCE_MASK)) + switch (cmd) { case MFC_PUT_CMD: - case MFC_PUTR_CMD: // ??? + case MFC_PUTB_CMD: + case MFC_PUTF_CMD: + case MFC_PUTR_CMD: + case MFC_PUTRB_CMD: + case MFC_PUTRF_CMD: case MFC_GET_CMD: + case MFC_GETB_CMD: + case MFC_GETF_CMD: { - if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "DMA %s%s%s%s: lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x, cmd=0x%x", - (op & MFC_PUT_CMD ? "PUT" : "GET"), - (op & MFC_RESULT_MASK ? "R" : ""), - (op & MFC_BARRIER_MASK ? "B" : ""), - (op & MFC_FENCE_MASK ? "F" : ""), - lsa, ea, tag, size, cmd); - - ProcessCmd(cmd, tag, lsa, ea, size); - MFCArgs.CMDStatus.SetValue(MFC_PPU_DMA_CMD_ENQUEUE_SUCCESSFUL); - break; + do_dma_transfer(cmd, ch_mfc_args); + return; } case MFC_PUTL_CMD: - case MFC_PUTRL_CMD: // ??? + case MFC_PUTLB_CMD: + case MFC_PUTLF_CMD: + case MFC_PUTRL_CMD: + case MFC_PUTRLB_CMD: + case MFC_PUTRLF_CMD: case MFC_GETL_CMD: + case MFC_GETLB_CMD: + case MFC_GETLF_CMD: { - if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "DMA %s%s%s%s: lsa=0x%x, list=0x%llx, tag=0x%x, size=0x%x, cmd=0x%x", - (op & MFC_PUT_CMD ? "PUT" : "GET"), - (op & MFC_RESULT_MASK ? "RL" : "L"), - (op & MFC_BARRIER_MASK ? "B" : ""), - (op & MFC_FENCE_MASK ? "F" : ""), - lsa, ea, tag, size, cmd); - - ListCmd(lsa, ea, tag, size, cmd, MFCArgs); - break; + do_dma_list_cmd(cmd, ch_mfc_args); + return; } - case MFC_GETLLAR_CMD: - case MFC_PUTLLC_CMD: - case MFC_PUTLLUC_CMD: - case MFC_PUTQLLUC_CMD: + case MFC_GETLLAR_CMD: // acquire reservation { - if (Ini.HLELogging.GetValue() || size != 128) LOG_NOTICE(Log::SPU, "DMA %s: lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x, cmd=0x%x", - (op == MFC_GETLLAR_CMD ? "GETLLAR" : - op == MFC_PUTLLC_CMD ? "PUTLLC" : - op == MFC_PUTLLUC_CMD ? "PUTLLUC" : "PUTQLLUC"), - lsa, ea, tag, size, cmd); - - if (op == MFC_GETLLAR_CMD) // get reservation + if (ch_mfc_args.size != 128) { - vm::reservation_acquire(vm::get_ptr(ls_offset + lsa), vm::cast(ea), 128, [this]() - { - m_events |= SPU_EVENT_LR; // TODO: atomic op - Notify(); - }); - - MFCArgs.AtomicStat.PushUncond(MFC_GETLLAR_SUCCESS); + break; } - else if (op == MFC_PUTLLC_CMD) // store conditional + + vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), vm::cast(ch_mfc_args.ea), 128, [this]() { - if (vm::reservation_update(vm::cast(ea), vm::get_ptr(ls_offset + lsa), 128)) - { - MFCArgs.AtomicStat.PushUncond(MFC_PUTLLC_SUCCESS); - } - else - { - MFCArgs.AtomicStat.PushUncond(MFC_PUTLLC_FAILURE); - } + ch_event_stat |= SPU_EVENT_LR; + Notify(); + }); + + ch_atomic_stat.push_uncond(MFC_GETLLAR_SUCCESS); + return; + } + + case MFC_PUTLLC_CMD: // store conditionally + { + if (ch_mfc_args.size != 128) + { + break; } - else // store unconditional (may be wrong) + + if (vm::reservation_update(vm::cast(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128)) { - vm::reservation_op(vm::cast(ea), 128, [this, tag, lsa, ea]() - { - memcpy(vm::priv_ptr(vm::cast(ea)), vm::get_ptr(ls_offset + lsa), 128); - }); - - if (op == MFC_PUTLLUC_CMD) - { - MFCArgs.AtomicStat.PushUncond(MFC_PUTLLUC_SUCCESS); - } - else - { - // tag may be used here - } - } - break; - } - - default: - { - LOG_ERROR(Log::SPU, "Unknown MFC cmd (opcode=0x%x, cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", op, cmd, lsa, ea, tag, size); - Emu.Pause(); - break; - } - } -} - -bool SPUThread::CheckEvents() -{ - return (m_events & m_event_mask) != 0; -} - -u32 SPUThread::GetChannelCount(u32 ch) -{ - u32 res = 0xdeafbeef; - - switch (ch) - { - case SPU_WrSRR0: res = 1; break; - case SPU_RdSRR0: res = 1; break; - case SPU_WrOutMbox: res = SPU.Out_MBox.GetFreeCount(); break; - case SPU_WrOutIntrMbox: res = SPU.Out_IntrMBox.GetFreeCount(); break; - case SPU_RdInMbox: res = SPU.In_MBox.GetCount(); break; - case MFC_RdTagStat: res = MFC1.TagStatus.GetCount(); break; - case MFC_RdListStallStat: res = StallStat.GetCount(); break; - case MFC_WrTagUpdate: res = MFC1.TagStatus.GetCount(); break;// hack - case SPU_RdSigNotify1: res = SPU.SNR[0].GetCount(); break; - case SPU_RdSigNotify2: res = SPU.SNR[1].GetCount(); break; - case MFC_RdAtomicStat: res = MFC1.AtomicStat.GetCount(); break; - case SPU_RdEventStat: res = CheckEvents() ? 1 : 0; break; - - default: - { - LOG_ERROR(Log::SPU, "%s error: unknown/illegal channel (%d [%s]).", - __FUNCTION__, ch, spu_ch_name[ch]); - return 0; - } - } - - //LOG_NOTICE(Log::SPU, "%s(%s) -> 0x%x", __FUNCTION__, spu_ch_name[ch], res); - return res; -} - -void SPUThread::WriteChannel(u32 ch, const u128& r) -{ - const u32 v = r._u32[3]; - - //LOG_NOTICE(Log::SPU, "%s(%s): v=0x%x", __FUNCTION__, spu_ch_name[ch], v); - - switch (ch) - { - case SPU_WrSRR0: - SRR0 = v & 0x3FFFC; //LSLR & ~3 - break; - case SPU_WrOutIntrMbox: - { - if (!group) // if RawSPU - { - if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "SPU_WrOutIntrMbox: interrupt(v=0x%x)", v); - while (!SPU.Out_IntrMBox.Push(v)) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - if (Emu.IsStopped()) - { - LOG_WARNING(Log::SPU, "%s(%s) aborted", __FUNCTION__, spu_ch_name[ch]); - return; - } - } - m_intrtag[2].stat |= 1; - if (std::shared_ptr t = Emu.GetCPU().GetThread(m_intrtag[2].thread)) - { - if (t->GetType() == CPU_THREAD_PPU) - { - if (t->IsAlive()) - { - LOG_ERROR(Log::SPU, "%s(%s): interrupt thread was alive", __FUNCTION__, spu_ch_name[ch]); - Emu.Pause(); - return; - } - PPUThread& ppu = *(PPUThread*)t.get(); - ppu.GPR[3] = ppu.m_interrupt_arg; - ppu.FastCall2(vm::read32(ppu.entry), vm::read32(ppu.entry + 4)); - } - } + ch_atomic_stat.push_uncond(MFC_PUTLLC_SUCCESS); } else { - const u8 code = v >> 24; + ch_atomic_stat.push_uncond(MFC_PUTLLC_FAILURE); + } + + return; + } + + case MFC_PUTLLUC_CMD: // store unconditionally + case MFC_PUTQLLUC_CMD: + { + if (ch_mfc_args.size != 128) + { + break; + } + + vm::reservation_op(vm::cast(ch_mfc_args.ea), 128, [this]() + { + memcpy(vm::priv_ptr(vm::cast(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128); + }); + + if (cmd == MFC_PUTLLUC_CMD) + { + ch_atomic_stat.push_uncond(MFC_PUTLLUC_SUCCESS); + } + else + { + // tag may be used here + } + + break; + } + } + + LOG_ERROR(SPU, "Unknown DMA %s: cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x", get_mfc_cmd_name(cmd), ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size, cmd); + throw ""; +} + +u32 SPUThread::get_ch_count(u32 ch) +{ + if (Ini.HLELogging.GetValue()) + { + LOG_NOTICE(SPU, "get_ch_count(ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???"); + } + + switch (ch) + { + //case SPU_WrSRR0: return 1; break; + //case SPU_RdSRR0: return 1; break; + case SPU_WrOutMbox: return ch_out_mbox.get_count() ^ 1; break; + case SPU_WrOutIntrMbox: return ch_out_intr_mbox.get_count() ^ 1; break; + case SPU_RdInMbox: return ch_in_mbox.get_count(); break; + case MFC_RdTagStat: return ch_tag_stat.get_count(); break; + case MFC_RdListStallStat: return ch_stall_stat.get_count(); break; + case MFC_WrTagUpdate: return ch_tag_stat.get_count(); break; // hack + case SPU_RdSigNotify1: return ch_snr1.get_count(); break; + case SPU_RdSigNotify2: return ch_snr2.get_count(); break; + case MFC_RdAtomicStat: return ch_atomic_stat.get_count(); break; + case SPU_RdEventStat: return ch_event_stat.read_relaxed() & ch_event_mask ? 1 : 0; break; + } + + LOG_ERROR(SPU, "get_ch_count(ch=%d [%s]): unknown/illegal channel", ch, ch < 128 ? spu_ch_name[ch] : "???"); + throw ""; +} + +u32 SPUThread::get_ch_value(u32 ch) +{ + if (Ini.HLELogging.GetValue()) + { + LOG_NOTICE(SPU, "get_ch_value(ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???"); + } + + switch (ch) + { + //case SPU_RdSRR0: + // value = SRR0; + // break; + case SPU_RdInMbox: + { + u32 result; + while (!ch_in_mbox.pop(result) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case MFC_RdTagStat: + { + u32 result; + while (!ch_tag_stat.pop(result) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case MFC_RdTagMask: + { + return ch_tag_mask; + } + + case SPU_RdSigNotify1: + { + u32 result; + while (!ch_snr1.pop(result) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case SPU_RdSigNotify2: + { + u32 result; + while (!ch_snr2.pop(result) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case MFC_RdAtomicStat: + { + u32 result; + while (!ch_atomic_stat.pop(result) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case MFC_RdListStallStat: + { + u32 result; + while (!ch_stall_stat.pop(result) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case SPU_RdDec: + { + return ch_dec_value - (u32)(get_time() - ch_dec_start_timestamp); + } + + case SPU_RdEventMask: + { + return ch_event_mask; + } + + case SPU_RdEventStat: + { + u32 result; + while (!(result = ch_event_stat.read_relaxed() & ch_event_mask) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + return result; + } + + case SPU_RdMachStat: + { + return 1; // hack (not isolated, interrupts enabled) + } + } + + LOG_ERROR(SPU, "get_ch_value(ch=%d [%s]): unknown/illegal channel", ch, ch < 128 ? spu_ch_name[ch] : "???"); + throw ""; +} + +void SPUThread::set_ch_value(u32 ch, u32 value) +{ + if (Ini.HLELogging.GetValue()) + { + LOG_NOTICE(SPU, "set_ch_value(ch=%d [%s], value=0x%x)", ch, ch < 128 ? spu_ch_name[ch] : "???", value); + } + + switch (ch) + { + //case SPU_WrSRR0: + // SRR0 = value & 0x3FFFC; //LSLR & ~3 + // break; + case SPU_WrOutIntrMbox: + { + if (m_type == CPU_THREAD_RAW_SPU) + { + while (!ch_out_intr_mbox.push(value) && !Emu.IsStopped()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack + } + + int2.set(SPU_INT2_STAT_MAILBOX_INT); + throw ""; + + //if (std::shared_ptr t = Emu.GetCPU().GetThread(m_intrtag[2].thread)) + //{ + // if (t->GetType() == CPU_THREAD_PPU) + // { + // if (t->IsAlive()) + // { + // LOG_ERROR(SPU, "%s(%s): interrupt thread was alive", __FUNCTION__, spu_ch_name[ch]); + // Emu.Pause(); + // return; + // } + // PPUThread& ppu = *(PPUThread*)t.get(); + // ppu.GPR[3] = ppu.m_interrupt_arg; + // ppu.FastCall2(vm::read32(ppu.entry), vm::read32(ppu.entry + 4)); + // } + //} + //return; + } + else + { + const u8 code = value >> 24; if (code < 64) { /* ===== sys_spu_thread_send_event (used by spu_printf) ===== */ @@ -522,35 +591,35 @@ void SPUThread::WriteChannel(u32 ch, const u128& r) u8 spup = code & 63; u32 data; - if (!SPU.Out_MBox.Pop(data)) + if (!ch_out_mbox.pop(data)) { - LOG_ERROR(Log::SPU, "sys_spu_thread_send_event(v=0x%x, spup=%d): Out_MBox is empty", v, spup); - return; + LOG_ERROR(SPU, "sys_spu_thread_send_event(value=0x%x, spup=%d): Out_MBox is empty", value, spup); + throw ""; } if (Ini.HLELogging.GetValue()) { - LOG_NOTICE(Log::SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, v & 0x00ffffff, data); + LOG_NOTICE(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); } - std::shared_ptr port = SPUPs[spup]; + std::shared_ptr port;// = SPUPs[spup]; std::lock_guard lock(port->m_mutex); if (!port->eq) { - LOG_WARNING(Log::SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (v & 0x00ffffff), data); - SPU.In_MBox.PushUncond(CELL_ENOTCONN); // TODO: check error passing + LOG_WARNING(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (value & 0x00ffffff), data); + ch_in_mbox.push_uncond(CELL_ENOTCONN); // TODO: check error passing return; } - if (!port->eq->events.push(SYS_SPU_THREAD_EVENT_USER_KEY, GetId(), ((u64)spup << 32) | (v & 0x00ffffff), data)) + if (!port->eq->events.push(SYS_SPU_THREAD_EVENT_USER_KEY, GetId(), ((u64)spup << 32) | (value & 0x00ffffff), data)) { - SPU.In_MBox.PushUncond(CELL_EBUSY); + ch_in_mbox.push_uncond(CELL_EBUSY); return; } - SPU.In_MBox.PushUncond(CELL_OK); + ch_in_mbox.push_uncond(CELL_OK); return; } else if (code < 128) @@ -560,31 +629,31 @@ void SPUThread::WriteChannel(u32 ch, const u128& r) const u8 spup = code & 63; u32 data; - if (!SPU.Out_MBox.Pop(data)) + if (!ch_out_mbox.pop(data)) { - LOG_ERROR(Log::SPU, "sys_spu_thread_throw_event(v=0x%x, spup=%d): Out_MBox is empty", v, spup); - return; + LOG_ERROR(SPU, "sys_spu_thread_throw_event(value=0x%x, spup=%d): Out_MBox is empty", value, spup); + throw ""; } if (Ini.HLELogging.GetValue()) { - LOG_WARNING(Log::SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, v & 0x00ffffff, data); + LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); } - std::shared_ptr port = SPUPs[spup]; + std::shared_ptr port;// = SPUPs[spup]; std::lock_guard lock(port->m_mutex); if (!port->eq) { - LOG_WARNING(Log::SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (v & 0x00ffffff), data); + LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (value & 0x00ffffff), data); return; } // TODO: check passing spup value - if (!port->eq->events.push(SYS_SPU_THREAD_EVENT_USER_KEY, GetId(), ((u64)spup << 32) | (v & 0x00ffffff), data)) + if (!port->eq->events.push(SYS_SPU_THREAD_EVENT_USER_KEY, GetId(), ((u64)spup << 32) | (value & 0x00ffffff), data)) { - LOG_WARNING(Log::SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (queue is full)", spup, (v & 0x00ffffff), data); + LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (queue is full)", spup, (value & 0x00ffffff), data); return; } @@ -593,31 +662,31 @@ void SPUThread::WriteChannel(u32 ch, const u128& r) else if (code == 128) { /* ===== sys_event_flag_set_bit ===== */ - u32 flag = v & 0xffffff; + u32 flag = value & 0xffffff; u32 data; - if (!SPU.Out_MBox.Pop(data)) + if (!ch_out_mbox.pop(data)) { - LOG_ERROR(Log::SPU, "sys_event_flag_set_bit(v=0x%x (flag=%d)): Out_MBox is empty", v, flag); - return; + LOG_ERROR(SPU, "sys_event_flag_set_bit(value=0x%x (flag=%d)): Out_MBox is empty", value, flag); + throw ""; } if (flag > 63) { - LOG_ERROR(Log::SPU, "sys_event_flag_set_bit(id=%d, v=0x%x): flag > 63", data, v, flag); - return; + LOG_ERROR(SPU, "sys_event_flag_set_bit(id=%d, value=0x%x): flag > 63", data, value, flag); + throw ""; } - //if (Ini.HLELogging.GetValue()) + if (Ini.HLELogging.GetValue()) { - LOG_WARNING(Log::SPU, "sys_event_flag_set_bit(id=%d, v=0x%x (flag=%d))", data, v, flag); + LOG_WARNING(SPU, "sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d))", data, value, flag); } std::shared_ptr ef; if (!Emu.GetIdManager().GetIDData(data, ef)) { - LOG_ERROR(Log::SPU, "sys_event_flag_set_bit(id=%d, v=0x%x (flag=%d)): EventFlag not found", data, v, flag); - SPU.In_MBox.PushUncond(CELL_ESRCH); + LOG_ERROR(SPU, "sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d)): EventFlag not found", data, value, flag); + ch_in_mbox.push_uncond(CELL_ESRCH); return; } @@ -628,36 +697,37 @@ void SPUThread::WriteChannel(u32 ch, const u128& r) { ef->signal.push(target); } - SPU.In_MBox.PushUncond(CELL_OK); + + ch_in_mbox.push_uncond(CELL_OK); return; } else if (code == 192) { /* ===== sys_event_flag_set_bit_impatient ===== */ - u32 flag = v & 0xffffff; + u32 flag = value & 0xffffff; u32 data; - if (!SPU.Out_MBox.Pop(data)) + if (!ch_out_mbox.pop(data)) { - LOG_ERROR(Log::SPU, "sys_event_flag_set_bit_impatient(v=0x%x (flag=%d)): Out_MBox is empty", v, flag); - return; + LOG_ERROR(SPU, "sys_event_flag_set_bit_impatient(value=0x%x (flag=%d)): Out_MBox is empty", value, flag); + throw ""; } if (flag > 63) { - LOG_ERROR(Log::SPU, "sys_event_flag_set_bit_impatient(id=%d, v=0x%x): flag > 63", data, v, flag); - return; + LOG_ERROR(SPU, "sys_event_flag_set_bit_impatient(id=%d, value=0x%x): flag > 63", data, value, flag); + throw ""; } - //if (Ini.HLELogging.GetValue()) + if (Ini.HLELogging.GetValue()) { - LOG_WARNING(Log::SPU, "sys_event_flag_set_bit_impatient(id=%d, v=0x%x (flag=%d))", data, v, flag); + LOG_WARNING(SPU, "sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d))", data, value, flag); } std::shared_ptr ef; if (!Emu.GetIdManager().GetIDData(data, ef)) { - LOG_WARNING(Log::SPU, "sys_event_flag_set_bit_impatient(id=%d, v=0x%x (flag=%d)): EventFlag not found", data, v, flag); + LOG_WARNING(SPU, "sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d)): EventFlag not found", data, value, flag); return; } @@ -668,281 +738,198 @@ void SPUThread::WriteChannel(u32 ch, const u128& r) { ef->signal.push(target); } + return; } else { - u32 data; - if (SPU.Out_MBox.Pop(data)) + if (ch_out_mbox.get_count()) { - LOG_ERROR(Log::SPU, "SPU_WrOutIntrMbox: unknown data (v=0x%x); Out_MBox = 0x%x", v, data); + LOG_ERROR(SPU, "SPU_WrOutIntrMbox: unknown data (value=0x%x); Out_MBox = 0x%x", value, ch_out_mbox.get_value()); } else { - LOG_ERROR(Log::SPU, "SPU_WrOutIntrMbox: unknown data (v=0x%x)", v); + LOG_ERROR(SPU, "SPU_WrOutIntrMbox: unknown data (value=0x%x)", value); } - SPU.In_MBox.PushUncond(CELL_EINVAL); // ??? - return; + + throw ""; } } - break; } case SPU_WrOutMbox: { - while (!SPU.Out_MBox.Push(v) && !Emu.IsStopped()) + while (!ch_out_mbox.push(value) && !Emu.IsStopped()) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack } - break; + + return; } case MFC_WrTagMask: { - MFC1.QueryMask.SetValue(v); - break; + ch_tag_mask = value; + return; } case MFC_WrTagUpdate: { - MFC1.TagStatus.PushUncond(MFC1.QueryMask.GetValue()); - break; + ch_tag_stat.push_uncond(ch_tag_mask); // hack + return; } case MFC_LSA: { - MFC1.LSA.SetValue(v); - break; + if (value >= 0x40000) + { + break; + } + + ch_mfc_args.lsa = value; + return; } case MFC_EAH: { - MFC1.EAH.SetValue(v); - break; + ch_mfc_args.eah = value; + return; } case MFC_EAL: { - MFC1.EAL.SetValue(v); - break; + ch_mfc_args.eal = value; + return; } case MFC_Size: { - MFC1.Size_Tag.SetValue((MFC1.Size_Tag.GetValue() & 0xffff) | (v << 16)); - break; + if (value > 16 * 1024) + { + break; + } + + ch_mfc_args.size = (u16)value; + return; } case MFC_TagID: { - MFC1.Size_Tag.SetValue((MFC1.Size_Tag.GetValue() & ~0xffff) | (v & 0xffff)); - break; - } + if (value >= 32) + { + break; + } + ch_mfc_args.tag = (u16)value; + return; + } case MFC_Cmd: { - MFC1.CMDStatus.SetValue(v); - EnqMfcCmd(MFC1); - break; + process_mfc_cmd(value); + ch_mfc_args = {}; // clear non-persistent data + return; } case MFC_WrListStallAck: { - if (v >= 32) + if (value >= 32) { - LOG_ERROR(Log::SPU, "MFC_WrListStallAck error: invalid tag(%d)", v); - return; + break; } - StalledList temp = StallList[v]; - if (!temp.MFCArgs) + + size_t processed = 0; + + for (size_t i = 0; i < mfc_queue.size(); i++) { - LOG_ERROR(Log::SPU, "MFC_WrListStallAck error: empty tag(%d)", v); - return; + if (mfc_queue[i].second.tag == value) + { + do_dma_list_cmd(mfc_queue[i].first, mfc_queue[i].second); + mfc_queue[i].second.tag = ~0; + processed++; + } } - StallList[v].MFCArgs = nullptr; - ListCmd(temp.lsa, temp.ea, temp.tag, temp.size, temp.cmd, *temp.MFCArgs); - break; + + while (processed) + { + for (size_t i = 0; i < mfc_queue.size(); i++) + { + if (mfc_queue[i].second.tag == ~0) + { + mfc_queue.erase(mfc_queue.begin() + i); + processed--; + break; + } + } + } + + return; } case SPU_WrDec: { - m_dec_start = get_time(); - m_dec_value = v; - break; + ch_dec_start_timestamp = get_time(); + ch_dec_value = value; + return; } case SPU_WrEventMask: { - m_event_mask = v; - if (v & ~(SPU_EVENT_IMPLEMENTED)) LOG_ERROR(Log::SPU, "SPU_WrEventMask: unsupported event masked (0x%x)"); - break; + if (value & ~(SPU_EVENT_IMPLEMENTED)) + { + break; + } + + ch_event_mask = value; + return; } case SPU_WrEventAck: { - m_events &= ~v; - break; - } - - default: - { - LOG_ERROR(Log::SPU, "%s error (v=0x%x): unknown/illegal channel (%d [%s]).", __FUNCTION__, v, ch, spu_ch_name[ch]); - break; + ch_event_stat &= ~value; + return; } } - if (Emu.IsStopped()) LOG_WARNING(Log::SPU, "%s(%s) aborted", __FUNCTION__, spu_ch_name[ch]); + LOG_ERROR(SPU, "set_ch_value(ch=%d [%s], value=0x%x): unknown/illegal channel", ch, ch < 128 ? spu_ch_name[ch] : "???", value); + throw ""; } -void SPUThread::ReadChannel(u128& r, u32 ch) +void SPUThread::stop_and_signal(u32 code) { - r.clear(); - u32& v = r._u32[3]; + if (Ini.HLELogging.GetValue()) + { + LOG_NOTICE(SPU, "stop_and_signal(code=0x%x)", code); + } - switch (ch) + if (m_type == CPU_THREAD_RAW_SPU) { - case SPU_RdSRR0: - v = SRR0; - break; - case SPU_RdInMbox: - { - while (!SPU.In_MBox.Pop(v) && !Emu.IsStopped()) + status.atomic_op([code](u32& status) { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - break; + status = (status & 0xffff) | (code << 16); + status |= SPU_STATUS_STOPPED_BY_STOP; + status &= ~SPU_STATUS_RUNNING; + }); + + Stop(); + + int2.set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT); + + throw "STOP"; // TODO } - case MFC_RdTagStat: - { - while (!MFC1.TagStatus.Pop(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - break; - } - - case MFC_RdTagMask: - { - v = MFC1.QueryMask.GetValue(); - break; - } - - case SPU_RdSigNotify1: - { - if (cfg.value & 1) - { - while (!SPU.SNR[0].Pop_XCHG(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - } - else - { - while (!SPU.SNR[0].Pop(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - } - break; - } - - case SPU_RdSigNotify2: - { - if (cfg.value & 2) - { - while (!SPU.SNR[1].Pop_XCHG(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - } - else - { - while (!SPU.SNR[1].Pop(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - } - break; - } - - case MFC_RdAtomicStat: - { - while (!MFC1.AtomicStat.Pop(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - break; - } - - case MFC_RdListStallStat: - { - while (!StallStat.Pop(v) && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - break; - } - - case SPU_RdDec: - { - v = m_dec_value - (u32)(get_time() - m_dec_start); - break; - } - - case SPU_RdEventMask: - { - v = m_event_mask; - break; - } - - case SPU_RdEventStat: - { - while (!CheckEvents() && !Emu.IsStopped()) - { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - } - v = m_events & m_event_mask; - break; - } - - case SPU_RdMachStat: - { - v = 1; // hack (not isolated, interrupts enabled) - // TODO: check value - break; - } - - default: - { - LOG_ERROR(Log::SPU, "%s error: unknown/illegal channel (%d [%s]).", __FUNCTION__, ch, spu_ch_name[ch]); - break; - } - } - - if (Emu.IsStopped()) LOG_WARNING(Log::SPU, "%s(%s) aborted", __FUNCTION__, spu_ch_name[ch]); - - //LOG_NOTICE(Log::SPU, "%s(%s) -> 0x%x", __FUNCTION__, spu_ch_name[ch], v); -} - -void SPUThread::StopAndSignal(u32 code) -{ - SetExitStatus(code); // exit code (not status) - // TODO: process interrupts for RawSPU - switch (code) { case 0x001: { std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack - break; + return; } case 0x002: { FastStop(); - break; + return; } case 0x003: @@ -955,7 +942,7 @@ void SPUThread::StopAndSignal(u32 code) { SetBranch(GPR[0]._u32[3] & 0x3fffc); } - break; + return; } case 0x110: @@ -963,31 +950,30 @@ void SPUThread::StopAndSignal(u32 code) /* ===== sys_spu_thread_receive_event ===== */ u32 spuq = 0; - if (!SPU.Out_MBox.Pop(spuq)) + if (!ch_out_mbox.pop(spuq)) { - LOG_ERROR(Log::SPU, "sys_spu_thread_receive_event: cannot read Out_MBox"); - SPU.In_MBox.PushUncond(CELL_EINVAL); // ??? - return; + LOG_ERROR(SPU, "sys_spu_thread_receive_event(): cannot read Out_MBox"); + throw ""; } - if (SPU.In_MBox.GetCount()) + if (ch_in_mbox.get_count()) { - LOG_ERROR(Log::SPU, "sys_spu_thread_receive_event(spuq=0x%x): In_MBox is not empty", spuq); - SPU.In_MBox.PushUncond(CELL_EBUSY); // ??? + LOG_ERROR(SPU, "sys_spu_thread_receive_event(spuq=0x%x): In_MBox is not empty", spuq); + ch_in_mbox.push_uncond(CELL_EBUSY); return; } if (Ini.HLELogging.GetValue()) { - LOG_NOTICE(Log::SPU, "sys_spu_thread_receive_event(spuq=0x%x)", spuq); + LOG_NOTICE(SPU, "sys_spu_thread_receive_event(spuq=0x%x)", spuq); } std::shared_ptr eq; - if (!SPUQs.GetEventQueue(FIX_SPUQ(spuq), eq)) - { - SPU.In_MBox.PushUncond(CELL_EINVAL); // TODO: check error value - return; - } + //if (!SPUQs.GetEventQueue(FIX_SPUQ(spuq), eq)) + //{ + // ch_in_mbox.push_uncond(CELL_EINVAL); // TODO: check error value + // return; + //} u32 tid = GetId(); @@ -1020,10 +1006,10 @@ void SPUThread::StopAndSignal(u32 code) { assert(!"sys_spu_thread_receive_event() failed (II)"); } - SPU.In_MBox.PushUncond(CELL_OK); - SPU.In_MBox.PushUncond((u32)event.data1); - SPU.In_MBox.PushUncond((u32)event.data2); - SPU.In_MBox.PushUncond((u32)event.data3); + ch_in_mbox.push_uncond(CELL_OK); + ch_in_mbox.push_uncond((u32)event.data1); + ch_in_mbox.push_uncond((u32)event.data2); + ch_in_mbox.push_uncond((u32)event.data3); if (!eq->sq.invalidate(tid, eq->protocol) && !eq->sq.pop(tid, eq->protocol)) { assert(!"sys_spu_thread_receive_event() failed (receiving)"); @@ -1038,41 +1024,48 @@ void SPUThread::StopAndSignal(u32 code) { assert(!"sys_spu_thread_receive_event() failed (cancelling)"); } - SPU.In_MBox.PushUncond(CELL_ECANCELED); + ch_in_mbox.push_uncond(CELL_ECANCELED); return; } std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack if (Emu.IsStopped()) { - LOG_WARNING(Log::SPU, "sys_spu_thread_receive_event(spuq=0x%x) aborted", spuq); + LOG_WARNING(SPU, "sys_spu_thread_receive_event(spuq=0x%x) aborted", spuq); return; } } - break; + + return; } case 0x101: { /* ===== sys_spu_thread_group_exit ===== */ - if (!group) + u32 value; + if (!ch_out_mbox.pop(value)) { - LOG_ERROR(Log::SPU, "sys_spu_thread_group_exit(): group not set"); - break; + LOG_ERROR(SPU, "sys_spu_thread_group_exit(): cannot read Out_MBox"); + throw ""; } - else if (!SPU.Out_MBox.GetCount()) + + std::shared_ptr tg; + if (!Emu.GetIdManager().GetIDData(tg_id, tg)) { - LOG_ERROR(Log::SPU, "sys_spu_thread_group_exit(): Out_MBox is empty"); + LOG_ERROR(SPU, "sys_spu_thread_group_exit(status=0x%x): invalid group (%d)", value, tg_id); + throw ""; } - else if (Ini.HLELogging.GetValue()) + + if (Ini.HLELogging.GetValue()) { - LOG_NOTICE(Log::SPU, "sys_spu_thread_group_exit(status=0x%x)", SPU.Out_MBox.GetValue()); + LOG_NOTICE(SPU, "sys_spu_thread_group_exit(status=0x%x)", value); } - group->m_group_exit = true; - group->m_exit_status = SPU.Out_MBox.GetValue(); - for (auto& v : group->list) + tg->m_group_exit = true; + tg->m_exit_status = value; + + for (auto& v : tg->list) { if (std::shared_ptr t = Emu.GetCPU().GetThread(v)) { @@ -1080,39 +1073,66 @@ void SPUThread::StopAndSignal(u32 code) } } - break; + return; } case 0x102: { /* ===== sys_spu_thread_exit ===== */ - if (!SPU.Out_MBox.GetCount()) + if (!ch_out_mbox.get_count()) { - LOG_ERROR(Log::SPU, "sys_spu_thread_exit(): Out_MBox is empty"); + LOG_ERROR(SPU, "sys_spu_thread_exit(): Out_MBox is empty"); + throw ""; } - else if (Ini.HLELogging.GetValue()) + + if (Ini.HLELogging.GetValue()) { - // the real exit status - LOG_NOTICE(Log::SPU, "sys_spu_thread_exit(status=0x%x)", SPU.Out_MBox.GetValue()); + LOG_NOTICE(SPU, "sys_spu_thread_exit(status=0x%x)", ch_out_mbox.get_value()); } - SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_STOP); + + status |= SPU_STATUS_STOPPED_BY_STOP; Stop(); break; } - - default: - if (!SPU.Out_MBox.GetCount()) - { - LOG_ERROR(Log::SPU, "Unknown STOP code: 0x%x (no message)", code); - } - else - { - LOG_ERROR(Log::SPU, "Unknown STOP code: 0x%x (message=0x%x)", code, SPU.Out_MBox.GetValue()); - } - Emu.Pause(); - break; } + + if (!ch_out_mbox.get_count()) + { + LOG_ERROR(SPU, "Unknown STOP code: 0x%x", code); + } + else + { + LOG_ERROR(SPU, "Unknown STOP code: 0x%x; Out_MBox=0x%x", code, ch_out_mbox.get_value()); + } + + throw ""; +} + +void SPUThread::halt() +{ + if (Ini.HLELogging.GetValue()) + { + LOG_NOTICE(SPU, "halt(code=0x%x)"); + } + + if (m_type == CPU_THREAD_RAW_SPU) + { + status.atomic_op([](u32& status) + { + status |= SPU_STATUS_STOPPED_BY_HALT; + status &= ~SPU_STATUS_RUNNING; + }); + + Stop(); + + int2.set(SPU_INT2_STAT_SPU_HALT_OR_STEP_INT); + + throw "HALT"; // TODO + } + + status |= SPU_STATUS_STOPPED_BY_HALT; + throw "HALT"; } spu_thread::spu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio) diff --git a/rpcs3/Emu/Cell/SPUThread.h b/rpcs3/Emu/Cell/SPUThread.h index af3c9c8064..73529e1745 100644 --- a/rpcs3/Emu/Cell/SPUThread.h +++ b/rpcs3/Emu/Cell/SPUThread.h @@ -1,13 +1,15 @@ #pragma once #include "Emu/Cell/Common.h" #include "Emu/CPU/CPUThread.h" +#include "Emu/Cell/SPUContext.h" #include "Emu/Memory/atomic_type.h" #include "Emu/SysCalls/lv2/sleep_queue_type.h" #include "Emu/SysCalls/lv2/sys_event.h" #include "Emu/Event.h" #include "MFC.h" -enum SPUchannels +// SPU Channels +enum : u32 { SPU_RdEventStat = 0, //Read event status with mask applied SPU_WrEventMask = 1, //Write event mask @@ -25,7 +27,8 @@ enum SPUchannels SPU_WrOutIntrMbox = 30, //Write outbound interrupt mailbox contents (interrupting PPU) }; -enum MFCchannels +// MFC Channels +enum : u32 { MFC_WrMSSyncReq = 9, //Write multisource synchronization request MFC_RdTagMask = 12, //Read tag mask @@ -43,7 +46,8 @@ enum MFCchannels MFC_RdAtomicStat = 27, //Read completion status of last completed immediate MFC atomic update command }; -enum SPUEvents +// SPU Events +enum : u32 { SPU_EVENT_MS = 0x1000, // multisource synchronization event SPU_EVENT_A = 0x800, // privileged attention event @@ -61,12 +65,31 @@ enum SPUEvents SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR, }; -enum +// SPU Class 0 Interrupts +enum : u64 { - SPU_RUNCNTL_STOP = 0, - SPU_RUNCNTL_RUNNABLE = 1, + SPU_INT0_STAT_DMA_ALIGNMENT_INT = (1ull << 0), + SPU_INT0_STAT_INVALID_DMA_CMD_INT = (1ull << 1), + SPU_INT0_STAT_SPU_ERROR_INT = (1ull << 2), }; +// SPU Class 2 Interrupts +enum : u64 +{ + SPU_INT2_STAT_MAILBOX_INT = (1ull << 0), + SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT = (1ull << 1), + SPU_INT2_STAT_SPU_HALT_OR_STEP_INT = (1ull << 2), + SPU_INT2_STAT_DMA_TAG_GROUP_COMPLETION_INT = (1ull << 3), + SPU_INT2_STAT_SPU_MAILBOX_THESHOLD_INT = (1ull << 4), +}; + +enum +{ + SPU_RUNCNTL_STOP_REQUEST = 0, + SPU_RUNCNTL_RUN_REQUEST = 1, +}; + +// SPU Status Register bits (not accurate) enum { SPU_STATUS_STOPPED = 0x0, @@ -108,6 +131,180 @@ enum SPU_RdSigNotify2_offs = 0x1C00C, }; +union spu_channel_t +{ + struct sync_var_t + { + u32 count; + u32 value; + }; + + atomic_t sync_var; // atomic variable + + sync_var_t data; // unsafe direct access + +public: + + bool push(u32 value) + { + bool out_result; + + sync_var.atomic_op([&out_result, value](sync_var_t& data) + { + if ((out_result = data.count == 0)) + { + data.count = 1; + data.value = value; + } + }); + + return out_result; + } + + void push_logical_or(u32 value) + { + sync_var._or({ 1, value }); + } + + void push_uncond(u32 value) + { + sync_var.exchange({ 1, value }); + } + + bool pop(u32& out_value) + { + bool out_result; + + sync_var.atomic_op([&out_result, &out_value](sync_var_t& data) + { + if ((out_result = data.count != 0)) + { + out_value = data.value; + data.count = 0; + data.value = 0; + } + }); + + return out_result; + } + + u32 pop_uncond() + { + u32 out_value; + + sync_var.atomic_op([&out_value](sync_var_t& data) + { + out_value = data.value; + data.count = 0; + // value is not cleared and may be read again + }); + + return out_value; + } + + void set_value(u32 value, u32 count = 1) + { + sync_var.write_relaxed({ count, value }); + } + + u32 get_value() + { + return sync_var.read_relaxed().value; + } + + u32 get_count() + { + return sync_var.read_relaxed().count; + } +}; + +struct spu_channel_4_t +{ + struct sync_var_t + { + u32 count; + u32 value0; + u32 value1; + u32 value2; + }; + + atomic_le_t sync_var; + atomic_le_t value3; + +public: + void clear() + { + sync_var.write_relaxed({}); + value3.write_relaxed({}); + } + + void push_uncond(u32 value) + { + value3.exchange(value); + + sync_var.atomic_op([value](sync_var_t& data) + { + switch (data.count++) + { + case 0: data.value0 = value; break; + case 1: data.value1 = value; break; + case 2: data.value2 = value; break; + default: data.count = 4; + } + }); + } + + bool pop(u32& out_value) + { + bool out_result; + + const u32 last_value = value3.read_sync(); + + sync_var.atomic_op([&out_result, &out_value, last_value](sync_var_t& data) + { + if ((out_result = data.count != 0)) + { + out_value = data.value0; + + data.count--; + data.value0 = data.value1; + data.value1 = data.value2; + data.value2 = last_value; + } + }); + + return out_result; + } + + u32 get_count() + { + return sync_var.read_relaxed().count; + } +}; + +struct spu_interrupt_t +{ + atomic_le_t mask; + atomic_le_t stat; + +public: + void set(u64 ints) + { + stat |= mask.read_relaxed() & ints; + } + + void clear(u64 ints) + { + stat &= ~ints; + } + + void clear() + { + mask.write_relaxed({}); + stat.write_relaxed({}); + } +}; + #define mmToU64Ptr(x) ((u64*)(&x)) #define mmToU32Ptr(x) ((u32*)(&x)) #define mmToU16Ptr(x) ((u16*)(&x)) @@ -254,273 +451,110 @@ public: } }; -union SPU_SNRConfig_hdr -{ - u64 value; - - SPU_SNRConfig_hdr() {} - - std::string ToString() const - { - return fmt::Format("%01x", value); - } - - void Reset() - { - memset(this, 0, sizeof(*this)); - } -}; - -struct SpuGroupInfo; - class SPUThread : public CPUThread { public: u128 GPR[128]; // General-Purpose Registers SPU_FPSCR FPSCR; - u32 SRR0; - SPU_SNRConfig_hdr cfg; // Signal Notification Registers Configuration (OR-mode enabled: 0x1 for SNR1, 0x2 for SNR2) - - std::shared_ptr SPUPs[64]; // SPU Thread Event Ports - EventManager SPUQs; // SPU Queue Mapping - std::shared_ptr group; // associated SPU Thread Group (null for raw spu) - - u64 m_dec_start; // timestamp of writing decrementer value - u32 m_dec_value; // written decrementer value - - u32 m_event_mask; - u32 m_events; std::unordered_map> m_addr_to_hle_function_map; - struct IntrTag + spu_mfc_arg_t ch_mfc_args; + + std::vector> mfc_queue; // Only used for stalled list transfers + + u32 ch_tag_mask; + spu_channel_t ch_tag_stat; + spu_channel_t ch_stall_stat; + spu_channel_t ch_atomic_stat; + + spu_channel_4_t ch_in_mbox; + + spu_channel_t ch_out_mbox; + spu_channel_t ch_out_intr_mbox; + + u64 snr_config; // SPU SNR Config Register + + spu_channel_t ch_snr1; // SPU Signal Notification Register 1 + spu_channel_t ch_snr2; // SPU Signal Notification Register 2 + + u32 ch_event_mask; + atomic_le_t ch_event_stat; + + u64 ch_dec_start_timestamp; // timestamp of writing decrementer value + u32 ch_dec_value; // written decrementer value + + atomic_le_t run_ctrl; // SPU Run Control register (only provided to get latest data written) + atomic_le_t status; // SPU Status register + atomic_le_t npc; // SPU Next Program Counter register + + spu_interrupt_t int0; // SPU Class 0 Interrupt Management + spu_interrupt_t int2; // SPU Class 2 Interrupt Management + + u32 tg_id; // SPU Thread Group Id + + void write_snr(bool number, u32 value) { - u32 enabled; // 1 == true - u32 thread; // established interrupt PPU thread - u64 mask; - u64 stat; - - IntrTag() - : enabled(0) - , thread(0) - , mask(0) - , stat(0) + if (!number) { - } - } m_intrtag[3]; - - // limited lock-free queue, most functions are barrier-free - template - class Channel - { - static_assert(max_count >= 1, "Invalid channel count"); - - struct ChannelData - { - u32 value; - u32 is_set; - }; - - atomic_t m_data[max_count]; - size_t m_push; - size_t m_pop; - - public: - __noinline Channel() - { - for (size_t i = 0; i < max_count; i++) + if (snr_config & 1) { - m_data[i].write_relaxed({}); - } - m_push = 0; - m_pop = 0; - } - - __forceinline void PopUncond(u32& res) - { - res = m_data[m_pop].read_relaxed().value; - m_data[m_pop].write_relaxed({}); - m_pop = (m_pop + 1) % max_count; - } - - __forceinline bool Pop(u32& res) - { - const auto data = m_data[m_pop].read_relaxed(); - if (data.is_set) - { - res = data.value; - m_data[m_pop].write_relaxed({}); - m_pop = (m_pop + 1) % max_count; - return true; + ch_snr1.push_logical_or(value); } else { - return false; + ch_snr1.push_uncond(value); } } - - __forceinline bool Pop_XCHG(u32& res) // not barrier-free, not tested + else { - const auto data = m_data[m_pop].exchange({}); - if (data.is_set) + if (snr_config & 2) { - res = data.value; - m_pop = (m_pop + 1) % max_count; - return true; + ch_snr2.push_logical_or(value); } else { - return false; + ch_snr2.push_uncond(value); } } + } - __forceinline void PushUncond_OR(const u32 value) // not barrier-free, not tested - { - m_data[m_push]._or({ value, 1 }); - m_push = (m_push + 1) % max_count; - } + void do_dma_transfer(u32 cmd, spu_mfc_arg_t args); + void do_dma_list_cmd(u32 cmd, spu_mfc_arg_t args); + void process_mfc_cmd(u32 cmd); - __forceinline void PushUncond(const u32 value) - { - m_data[m_push].write_relaxed({ value, 1 }); - m_push = (m_push + 1) % max_count; - } + u32 get_ch_count(u32 ch); + u32 get_ch_value(u32 ch); + void set_ch_value(u32 ch, u32 value); - __forceinline bool Push(const u32 value) - { - if (m_data[m_push].read_relaxed().is_set) - { - return false; - } - else - { - PushUncond(value); - return true; - } - } + void stop_and_signal(u32 code); + void halt(); - __forceinline u32 GetCount() const - { - u32 res = 0; - for (size_t i = 0; i < max_count; i++) - { - res += m_data[i].read_relaxed().is_set ? 1 : 0; - } - return res; - } + u8 read8(u32 lsa) const { return vm::read8(lsa + offset); } + u16 read16(u32 lsa) const { return vm::read16(lsa + offset); } + u32 read32(u32 lsa) const { return vm::read32(lsa + offset); } + u64 read64(u32 lsa) const { return vm::read64(lsa + offset); } + u128 read128(u32 lsa) const { return vm::read128(lsa + offset); } - __forceinline u32 GetFreeCount() const - { - u32 res = 0; - for (size_t i = 0; i < max_count; i++) - { - res += m_data[i].read_relaxed().is_set ? 0 : 1; - } - return res; - } + void write8(u32 lsa, u8 data) const { vm::write8(lsa + offset, data); } + void write16(u32 lsa, u16 data) const { vm::write16(lsa + offset, data); } + void write32(u32 lsa, u32 data) const { vm::write32(lsa + offset, data); } + void write64(u32 lsa, u64 data) const { vm::write64(lsa + offset, data); } + void write128(u32 lsa, u128 data) const { vm::write128(lsa + offset, data); } - __forceinline void SetValue(const u32 value) - { - m_data[m_push].direct_op([value](ChannelData& v) - { - v.value = value; - }); - } - - __forceinline u32 GetValue() const - { - return m_data[m_pop].read_relaxed().value; - } - }; - - struct MFCReg - { - Channel<1> LSA; - Channel<1> EAH; - Channel<1> EAL; - Channel<1> Size_Tag; - Channel<1> CMDStatus; - Channel<1> QueryType; // only for prxy - Channel<1> QueryMask; - Channel<1> TagStatus; - Channel<1> AtomicStat; - } MFC1, MFC2; - - struct StalledList - { - u32 lsa; - u64 ea; - u16 tag; - u16 size; - u32 cmd; - MFCReg* MFCArgs; - - StalledList() - : MFCArgs(nullptr) - { - } - } StallList[32]; - Channel<1> StallStat; - - struct - { - Channel<1> Out_MBox; - Channel<1> Out_IntrMBox; - Channel<4> In_MBox; - Channel<1> Status; - Channel<1> NPC; - Channel<1> SNR[2]; - } SPU; - - void WriteSNR(bool number, u32 value); - - u32 LSA; - - union - { - u64 EA; - struct { u32 EAH, EAL; }; - }; - - u32 ls_offset; - - void ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size); - - void ListCmd(u32 lsa, u64 ea, u16 tag, u16 size, u32 cmd, MFCReg& MFCArgs); - - void EnqMfcCmd(MFCReg& MFCArgs); - - bool CheckEvents(); - - u32 GetChannelCount(u32 ch); - - void WriteChannel(u32 ch, const u128& r); - - void ReadChannel(u128& r, u32 ch); - - void StopAndSignal(u32 code); - - u8 ReadLS8 (const u32 lsa) const { return vm::read8 (lsa + m_offset); } - u16 ReadLS16 (const u32 lsa) const { return vm::read16 (lsa + m_offset); } - u32 ReadLS32 (const u32 lsa) const { return vm::read32 (lsa + m_offset); } - u64 ReadLS64 (const u32 lsa) const { return vm::read64 (lsa + m_offset); } - u128 ReadLS128(const u32 lsa) const { return vm::read128(lsa + m_offset); } - - void WriteLS8 (const u32 lsa, const u8& data) const { vm::write8 (lsa + m_offset, data); } - void WriteLS16 (const u32 lsa, const u16& data) const { vm::write16 (lsa + m_offset, data); } - void WriteLS32 (const u32 lsa, const u32& data) const { vm::write32 (lsa + m_offset, data); } - void WriteLS64 (const u32 lsa, const u64& data) const { vm::write64 (lsa + m_offset, data); } - void WriteLS128(const u32 lsa, const u128& data) const { vm::write128(lsa + m_offset, data); } + void write16(u32 lsa, be_t data) const { vm::write16(lsa + offset, data); } + void write32(u32 lsa, be_t data) const { vm::write32(lsa + offset, data); } + void write64(u32 lsa, be_t data) const { vm::write64(lsa + offset, data); } + void write128(u32 lsa, be_t data) const { vm::write128(lsa + offset, data); } void RegisterHleFunction(u32 addr, std::function function) { m_addr_to_hle_function_map[addr] = function; - WriteLS32(addr, 0x00000003); // STOP 3 + write32(addr, 0x00000003); // STOP 3 } void UnregisterHleFunction(u32 addr) { - WriteLS32(addr, 0x00200000); // NOP m_addr_to_hle_function_map.erase(addr); } @@ -530,7 +564,6 @@ public: { if (iter->first >= start_addr && iter->first <= end_addr) { - WriteLS32(iter->first, 0x00200000); // NOP m_addr_to_hle_function_map.erase(iter++); } else diff --git a/rpcs3/Emu/Memory/atomic_type.h b/rpcs3/Emu/Memory/atomic_type.h index 24ad05a7f7..994552e591 100644 --- a/rpcs3/Emu/Memory/atomic_type.h +++ b/rpcs3/Emu/Memory/atomic_type.h @@ -9,7 +9,7 @@ template struct _to_atomic { - static_assert(size == 1 || size == 2 || size == 4 || size == 8, "Invalid atomic type"); + static_assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 16, "Invalid atomic type"); typedef T type; }; @@ -38,6 +38,12 @@ struct _to_atomic typedef uint64_t type; }; +template +struct _to_atomic +{ + typedef u128 type; +}; + template class _atomic_base { diff --git a/rpcs3/Emu/SysCalls/Modules/cellSpursSpu.cpp b/rpcs3/Emu/SysCalls/Modules/cellSpursSpu.cpp index 51a64ef198..42d46fe426 100644 --- a/rpcs3/Emu/SysCalls/Modules/cellSpursSpu.cpp +++ b/rpcs3/Emu/SysCalls/Modules/cellSpursSpu.cpp @@ -79,7 +79,7 @@ void cellSpursModulePutTrace(CellSpursTracePacket * packet, u32 dmaTagId) { /// Check for execution right requests u32 cellSpursModulePollStatus(SPUThread & spu, u32 * status) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); spu.GPR[3]._u32[3] = 1; if (ctxt->spurs->m.flags1 & SF1_32_WORKLOADS) { @@ -99,24 +99,24 @@ u32 cellSpursModulePollStatus(SPUThread & spu, u32 * status) { /// Exit current workload void cellSpursModuleExit(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); spu.SetBranch(ctxt->exitToKernelAddr); } /// Execute a DMA operation bool spursDma(SPUThread & spu, u32 cmd, u64 ea, u32 lsa, u32 size, u32 tag) { - spu.WriteChannel(MFC_LSA, u128::from32r(lsa)); - spu.WriteChannel(MFC_EAH, u128::from32r((u32)(ea >> 32))); - spu.WriteChannel(MFC_EAL, u128::from32r((u32)ea)); - spu.WriteChannel(MFC_Size, u128::from32r(size)); - spu.WriteChannel(MFC_TagID, u128::from32r(tag)); - spu.WriteChannel(MFC_Cmd, u128::from32r(cmd)); + spu.set_ch_value(MFC_LSA, lsa); + spu.set_ch_value(MFC_EAH, (u32)(ea >> 32)); + spu.set_ch_value(MFC_EAL, (u32)(ea)); + spu.set_ch_value(MFC_Size, size); + spu.set_ch_value(MFC_TagID, tag); + spu.set_ch_value(MFC_Cmd, cmd); if (cmd == MFC_GETLLAR_CMD || cmd == MFC_PUTLLC_CMD || cmd == MFC_PUTLLUC_CMD) { - u128 rv; + u32 rv; - spu.ReadChannel(rv, MFC_RdAtomicStat); - auto success = rv._u32[3] ? true : false; + rv = spu.get_ch_value(MFC_RdAtomicStat); + auto success = rv ? true : false; success = cmd == MFC_PUTLLC_CMD ? !success : success; return success; } @@ -126,28 +126,21 @@ bool spursDma(SPUThread & spu, u32 cmd, u64 ea, u32 lsa, u32 size, u32 tag) { /// Get the status of DMA operations u32 spursDmaGetCompletionStatus(SPUThread & spu, u32 tagMask) { - u128 rv; - - spu.WriteChannel(MFC_WrTagMask, u128::from32r(tagMask)); - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(MFC_TAG_UPDATE_IMMEDIATE)); - spu.ReadChannel(rv, MFC_RdTagStat); - return rv._u32[3]; + spu.set_ch_value(MFC_WrTagMask, tagMask); + spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_IMMEDIATE); + return spu.get_ch_value(MFC_RdTagStat); } /// Wait for DMA operations to complete u32 spursDmaWaitForCompletion(SPUThread & spu, u32 tagMask, bool waitForAll) { - u128 rv; - - spu.WriteChannel(MFC_WrTagMask, u128::from32r(tagMask)); - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(waitForAll ? MFC_TAG_UPDATE_ALL : MFC_TAG_UPDATE_ANY)); - spu.ReadChannel(rv, MFC_RdTagStat); - return rv._u32[3]; + spu.set_ch_value(MFC_WrTagMask, tagMask); + spu.set_ch_value(MFC_WrTagUpdate, waitForAll ? MFC_TAG_UPDATE_ALL : MFC_TAG_UPDATE_ANY); + return spu.get_ch_value(MFC_RdTagStat); } /// Halt the SPU void spursHalt(SPUThread & spu) { - spu.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT); - spu.Stop(); + spu.halt(); } ////////////////////////////////////////////////////////////////////////////// @@ -156,7 +149,7 @@ void spursHalt(SPUThread & spu) { /// Select a workload to run bool spursKernel1SelectWorkload(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); // The first and only argument to this function is a boolean that is set to false if the function // is called by the SPURS kernel and set to true if called by cellSpursModulePollStatus. @@ -302,7 +295,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) { } } - memcpy(vm::get_ptr(spu.ls_offset + 0x100), spurs, 128); + memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128); }); u64 result = (u64)wklSelectedId << 32; @@ -313,7 +306,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) { /// Select a workload to run bool spursKernel2SelectWorkload(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); // The first and only argument to this function is a boolean that is set to false if the function // is called by the SPURS kernel and set to true if called by cellSpursModulePollStatus. @@ -449,7 +442,7 @@ bool spursKernel2SelectWorkload(SPUThread & spu) { } } - memcpy(vm::get_ptr(spu.ls_offset + 0x100), spurs, 128); + memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128); }); u64 result = (u64)wklSelectedId << 32; @@ -460,7 +453,7 @@ bool spursKernel2SelectWorkload(SPUThread & spu) { /// SPURS kernel dispatch workload void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); auto isKernel2 = ctxt->spurs->m.flags1 & SF1_32_WORKLOADS ? true : false; auto pollStatus = (u32)widAndPollStatus; @@ -471,10 +464,10 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) { wid < CELL_SPURS_MAX_WORKLOAD2 && isKernel2 ? &ctxt->spurs->m.wklInfo2[wid & 0xf] : &ctxt->spurs->m.wklInfoSysSrv; - memcpy(vm::get_ptr(spu.ls_offset + 0x3FFE0), wklInfoOffset, 0x20); + memcpy(vm::get_ptr(spu.offset + 0x3FFE0), wklInfoOffset, 0x20); // Load the workload to LS - auto wklInfo = vm::get_ptr(spu.ls_offset + 0x3FFE0); + auto wklInfo = vm::get_ptr(spu.offset + 0x3FFE0); if (ctxt->wklCurrentAddr != wklInfo->addr) { switch (wklInfo->addr.addr().value()) { case SPURS_IMG_ADDR_SYS_SRV_WORKLOAD: @@ -484,7 +477,7 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) { spu.RegisterHleFunction(0xA00, spursTasksetEntry); break; default: - memcpy(vm::get_ptr(spu.ls_offset + 0xA00), wklInfo->addr.get_ptr(), wklInfo->size); + memcpy(vm::get_ptr(spu.offset + 0xA00), wklInfo->addr.get_ptr(), wklInfo->size); break; } @@ -508,7 +501,7 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) { /// SPURS kernel workload exit bool spursKernelWorkloadExit(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); auto isKernel2 = ctxt->spurs->m.flags1 & SF1_32_WORKLOADS ? true : false; // Select next workload to run @@ -532,7 +525,7 @@ bool spursKernelEntry(SPUThread & spu) { } } - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); memset(ctxt, 0, sizeof(SpursKernelContext)); // Save arguments @@ -578,7 +571,7 @@ bool spursKernelEntry(SPUThread & spu) { /// Entry point of the system service bool spursSysServiceEntry(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + spu.GPR[3]._u32[3]); + auto ctxt = vm::get_ptr(spu.offset + spu.GPR[3]._u32[3]); auto arg = spu.GPR[4]._u64[1]; auto pollStatus = spu.GPR[5]._u32[3]; @@ -598,8 +591,8 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) { bool shouldExit; while (true) { - vm::reservation_acquire(vm::get_ptr(spu.ls_offset + 0x100), vm::cast(ctxt->spurs.addr()), 128, [&spu](){ spu.Notify(); }); - auto spurs = vm::get_ptr(spu.ls_offset + 0x100); + vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), vm::cast(ctxt->spurs.addr()), 128, [&spu](){ spu.Notify(); }); + auto spurs = vm::get_ptr(spu.offset + 0x100); // Find the number of SPUs that are idling in this SPURS instance u32 nIdlingSpus = 0; @@ -669,7 +662,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) { if (Emu.IsStopped()) return; } - if (vm::reservation_update(vm::cast(ctxt->spurs.addr()), vm::get_ptr(spu.ls_offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) { + if (vm::reservation_update(vm::cast(ctxt->spurs.addr()), vm::get_ptr(spu.offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) { break; } } @@ -681,7 +674,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) { /// Main function for the system service void spursSysServiceMain(SPUThread & spu, u32 pollStatus) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x100); if (ctxt->spurs.addr() % CellSpurs::align) { assert(!"spursSysServiceMain(): invalid spurs alignment"); @@ -693,7 +686,7 @@ void spursSysServiceMain(SPUThread & spu, u32 pollStatus) { if (ctxt->sysSrvInitialised == 0) { ctxt->sysSrvInitialised = 1; - vm::reservation_acquire(vm::get_ptr(spu.ls_offset + 0x100), vm::cast(ctxt->spurs.addr()), 128); + vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), vm::cast(ctxt->spurs.addr()), 128); vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.wklState1)), 128, [&]() { auto spurs = ctxt->spurs.priv_ptr(); @@ -707,7 +700,7 @@ void spursSysServiceMain(SPUThread & spu, u32 pollStatus) { spurs->m.sysSrvOnSpu |= 1 << ctxt->spuNum; - memcpy(vm::get_ptr(spu.ls_offset + 0x2D80), spurs->m.wklState1, 128); + memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->m.wklState1, 128); }); ctxt->traceBuffer = 0; @@ -805,7 +798,7 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt) updateTrace = true; } - memcpy(vm::get_ptr(spu.ls_offset + 0x2D80), spurs->m.wklState1, 128); + memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->m.wklState1, 128); }); // Process update workload message @@ -826,24 +819,24 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt) /// Activate a workload void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt) { - auto spurs = vm::get_ptr(spu.ls_offset + 0x100); - memcpy(vm::get_ptr(spu.ls_offset + 0x30000), vm::get_ptr(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.wklInfo1))), 0x200); + auto spurs = vm::get_ptr(spu.offset + 0x100); + memcpy(vm::get_ptr(spu.offset + 0x30000), vm::get_ptr(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.wklInfo1))), 0x200); if (spurs->m.flags1 & SF1_32_WORKLOADS) { - memcpy(vm::get_ptr(spu.ls_offset + 0x30200), vm::get_ptr(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.wklInfo2))), 0x200); + memcpy(vm::get_ptr(spu.offset + 0x30200), vm::get_ptr(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.wklInfo2))), 0x200); } u32 wklShutdownBitSet = 0; ctxt->wklRunnable1 = 0; ctxt->wklRunnable2 = 0; for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) { - auto wklInfo1 = vm::get_ptr(spu.ls_offset + 0x30000); + auto wklInfo1 = vm::get_ptr(spu.offset + 0x30000); // Copy the priority of the workload for this SPU and its unique id to the LS ctxt->priority[i] = wklInfo1[i].priority[ctxt->spuNum] == 0 ? 0 : 0x10 - wklInfo1[i].priority[ctxt->spuNum]; ctxt->wklUniqueId[i] = wklInfo1[i].uniqueId.read_relaxed(); if (spurs->m.flags1 & SF1_32_WORKLOADS) { - auto wklInfo2 = vm::get_ptr(spu.ls_offset + 0x30200); + auto wklInfo2 = vm::get_ptr(spu.offset + 0x30200); // Copy the priority of the workload for this SPU to the LS if (wklInfo2[i].priority[ctxt->spuNum]) { @@ -895,7 +888,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt) } } - memcpy(vm::get_ptr(spu.ls_offset + 0x2D80), spurs->m.wklState1, 128); + memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->m.wklState1, 128); }); if (wklShutdownBitSet) { @@ -930,7 +923,7 @@ void spursSysServiceUpdateShutdownCompletionEvents(SPUThread & spu, SpursKernelC } } - memcpy(vm::get_ptr(spu.ls_offset + 0x2D80), spurs->m.wklState1, 128); + memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->m.wklState1, 128); }); if (wklNotifyBitSet) { @@ -970,19 +963,19 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32 notify = true; } - memcpy(vm::get_ptr(spu.ls_offset + 0x2D80), spurs->m.wklState1, 128); + memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->m.wklState1, 128); }); // Get trace parameters from CellSpurs and store them in the LS if (((sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) != 0) || (arg3 != 0)) { - vm::reservation_acquire(vm::get_ptr(spu.ls_offset + 0x80), vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.traceBuffer)), 128); - auto spurs = vm::get_ptr(spu.ls_offset + 0x80 - offsetof(CellSpurs, m.traceBuffer)); + vm::reservation_acquire(vm::get_ptr(spu.offset + 0x80), vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, m.traceBuffer)), 128); + auto spurs = vm::get_ptr(spu.offset + 0x80 - offsetof(CellSpurs, m.traceBuffer)); if (ctxt->traceMsgCount != 0xFF || spurs->m.traceBuffer.addr() == 0) { spursSysServiceTraceSaveCount(spu, ctxt); } else { - memcpy(vm::get_ptr(spu.ls_offset + 0x2C00), vm::get_ptr(spurs->m.traceBuffer.addr() & -0x4), 0x80); - auto traceBuffer = vm::get_ptr(spu.ls_offset + 0x2C00); + memcpy(vm::get_ptr(spu.offset + 0x2C00), vm::get_ptr(spurs->m.traceBuffer.addr() & -0x4), 0x80); + auto traceBuffer = vm::get_ptr(spu.offset + 0x2C00); ctxt->traceMsgCount = traceBuffer->count[ctxt->spuNum]; } @@ -994,7 +987,7 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32 } if (notify) { - auto spurs = vm::get_ptr(spu.ls_offset + 0x2D80 - offsetof(CellSpurs, m.wklState1)); + auto spurs = vm::get_ptr(spu.offset + 0x2D80 - offsetof(CellSpurs, m.wklState1)); sys_spu_thread_send_event(spu, spurs->m.spuPort, 2, 0); } } @@ -1016,7 +1009,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte wklId = spurs->m.sysSrvWorkload[ctxt->spuNum]; spurs->m.sysSrvWorkload[ctxt->spuNum] = 0xFF; - memcpy(vm::get_ptr(spu.ls_offset + 0x2D80), spurs->m.wklState1, 128); + memcpy(vm::get_ptr(spu.offset + 0x2D80), spurs->m.wklState1, 128); }); if (do_return) return; @@ -1034,7 +1027,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte spurs->m.wklIdleSpuCountOrReadyCount2[wklId & 0x0F].write_relaxed(spurs->m.wklIdleSpuCountOrReadyCount2[wklId & 0x0F].read_relaxed() - 1); } - memcpy(vm::get_ptr(spu.ls_offset + 0x100), spurs, 128); + memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128); }); // Set the current workload id to the id of the pre-empted workload since cellSpursModulePutTrace @@ -1069,8 +1062,8 @@ enum SpursTasksetRequest { /// Taskset PM entry point bool spursTasksetEntry(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); - auto kernelCtxt = vm::get_ptr(spu.ls_offset + spu.GPR[3]._u32[3]); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); + auto kernelCtxt = vm::get_ptr(spu.offset + spu.GPR[3]._u32[3]); auto arg = spu.GPR[4]._u64[1]; auto pollStatus = spu.GPR[5]._u32[3]; @@ -1100,7 +1093,7 @@ bool spursTasksetEntry(SPUThread & spu) { /// Entry point into the Taskset PM for task syscalls bool spursTasksetSyscallEntry(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); // Save task context ctxt->savedContextLr = spu.GPR[0]; @@ -1122,7 +1115,7 @@ bool spursTasksetSyscallEntry(SPUThread & spu) { /// Resume a task void spursTasksetResumeTask(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); // Restore task context spu.GPR[0] = ctxt->savedContextLr; @@ -1136,8 +1129,8 @@ void spursTasksetResumeTask(SPUThread & spu) { /// Start a task void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); - auto taskset = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); + auto taskset = vm::get_ptr(spu.offset + 0x2700); spu.GPR[2].clear(); spu.GPR[3] = u128::from64r(taskArgs._u64[0], taskArgs._u64[1]); @@ -1152,8 +1145,8 @@ void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) { /// Process a request and update the state of the taskset s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 * isWaiting) { - auto kernelCtxt = vm::get_ptr(spu.ls_offset + 0x100); - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); + auto kernelCtxt = vm::get_ptr(spu.offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); s32 rc = CELL_OK; s32 numNewlyReadyTasks; @@ -1294,7 +1287,7 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 * taskset->m.signalled = signalled; taskset->m.ready = ready; - memcpy(vm::get_ptr(spu.ls_offset + 0x2700), taskset, 128); + memcpy(vm::get_ptr(spu.offset + 0x2700), taskset, 128); }); // Increment the ready count of the workload by the number of tasks that have become ready @@ -1311,7 +1304,7 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 * spurs->m.wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F].write_relaxed(readyCount); } - memcpy(vm::get_ptr(spu.ls_offset + 0x100), spurs, 128); + memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128); }); return rc; @@ -1338,7 +1331,7 @@ bool spursTasksetPollStatus(SPUThread & spu) { /// Exit the Taskset PM void spursTasksetExit(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); // Trace - STOP CellSpursTracePacket pkt; @@ -1358,9 +1351,9 @@ void spursTasksetExit(SPUThread & spu) { /// Invoked when a task exits void spursTasksetOnTaskExit(SPUThread & spu, u64 addr, u32 taskId, s32 exitCode, u64 args) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); - memcpy(vm::get_ptr(spu.ls_offset + 0x10000), vm::get_ptr(addr & -0x80), (addr & 0x7F) << 11); + memcpy(vm::get_ptr(spu.offset + 0x10000), vm::get_ptr(addr & -0x80), (addr & 0x7F) << 11); spu.GPR[3]._u64[1] = ctxt->taskset.addr(); spu.GPR[4]._u32[3] = taskId; @@ -1371,8 +1364,8 @@ void spursTasksetOnTaskExit(SPUThread & spu, u64 addr, u32 taskId, s32 exitCode, /// Save the context of a task s32 spursTasketSaveTaskContext(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); - auto taskInfo = vm::get_ptr(spu.ls_offset + 0x2780); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); + auto taskInfo = vm::get_ptr(spu.offset + 0x2780); //spursDmaWaitForCompletion(spu, 0xFFFFFFFF); @@ -1404,20 +1397,18 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) { u128 r; spu.FPSCR.Read(r); ctxt->savedContextFpscr = r; - spu.ReadChannel(r, SPU_RdEventMask); - ctxt->savedSpuWriteEventMask = r._u32[3]; - spu.ReadChannel(r, MFC_RdTagMask); - ctxt->savedWriteTagGroupQueryMask = r._u32[3]; + ctxt->savedSpuWriteEventMask = spu.get_ch_value(SPU_RdEventMask); + ctxt->savedWriteTagGroupQueryMask = spu.get_ch_value(MFC_RdTagMask); // Store the processor context const u32 contextSaveStorage = vm::cast(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80); - memcpy(vm::get_ptr(contextSaveStorage), vm::get_ptr(spu.ls_offset + 0x2C80), 0x380); + memcpy(vm::get_ptr(contextSaveStorage), vm::get_ptr(spu.offset + 0x2C80), 0x380); // Save LS context for (auto i = 6; i < 128; i++) { if (ls_pattern._bit[i]) { // TODO: Combine DMA requests for consecutive blocks into a single request - memcpy(vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), vm::get_ptr(spu.ls_offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), 0x800); + memcpy(vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), vm::get_ptr(spu.offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), 0x800); } } @@ -1427,8 +1418,8 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) { /// Taskset dispatcher void spursTasksetDispatch(SPUThread & spu) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); - auto taskset = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); + auto taskset = vm::get_ptr(spu.offset + 0x2700); u32 taskId; u32 isWaiting; @@ -1441,8 +1432,8 @@ void spursTasksetDispatch(SPUThread & spu) { ctxt->taskId = taskId; // DMA in the task info for the selected task - memcpy(vm::get_ptr(spu.ls_offset + 0x2780), &ctxt->taskset->m.task_info[taskId], sizeof(CellSpursTaskset::TaskInfo)); - auto taskInfo = vm::get_ptr(spu.ls_offset + 0x2780); + memcpy(vm::get_ptr(spu.offset + 0x2780), &ctxt->taskset->m.task_info[taskId], sizeof(CellSpursTaskset::TaskInfo)); + auto taskInfo = vm::get_ptr(spu.offset + 0x2780); auto elfAddr = taskInfo->elf_addr.addr().value(); taskInfo->elf_addr.set(taskInfo->elf_addr.addr() & 0xFFFFFFFFFFFFFFF8ull); @@ -1456,7 +1447,7 @@ void spursTasksetDispatch(SPUThread & spu) { if (isWaiting == 0) { // If we reach here it means that the task is being started and not being resumed - memset(vm::get_ptr(spu.ls_offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP); + memset(vm::get_ptr(spu.offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP); ctxt->guidAddr = CELL_SPURS_TASK_TOP; u32 entryPoint; @@ -1477,7 +1468,7 @@ void spursTasksetDispatch(SPUThread & spu) { ctxt->x2FD4 = elfAddr & 5; // TODO: Figure this out if ((elfAddr & 5) == 1) { - memcpy(vm::get_ptr(spu.ls_offset + 0x2FC0), &((CellSpursTaskset2*)(ctxt->taskset.get_ptr()))->m.task_exit_code[taskId], 0x10); + memcpy(vm::get_ptr(spu.offset + 0x2FC0), &((CellSpursTaskset2*)(ctxt->taskset.get_ptr()))->m.task_exit_code[taskId], 0x10); } // Trace - GUID @@ -1487,7 +1478,7 @@ void spursTasksetDispatch(SPUThread & spu) { cellSpursModulePutTrace(&pkt, 0x1F); if (elfAddr & 2) { // TODO: Figure this out - spu.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_STOP); + spu.status |= SPU_STATUS_STOPPED_BY_STOP; spu.Stop(); return; } @@ -1495,7 +1486,7 @@ void spursTasksetDispatch(SPUThread & spu) { spursTasksetStartTask(spu, taskInfo->args); } else { if (taskset->m.enable_clear_ls) { - memset(vm::get_ptr(spu.ls_offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP); + memset(vm::get_ptr(spu.offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP); } // If the entire LS is saved then there is no need to load the ELF as it will be be saved in the context save area as well @@ -1512,11 +1503,11 @@ void spursTasksetDispatch(SPUThread & spu) { // Load saved context from main memory to LS const u32 contextSaveStorage = vm::cast(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80); - memcpy(vm::get_ptr(spu.ls_offset + 0x2C80), vm::get_ptr(contextSaveStorage), 0x380); + memcpy(vm::get_ptr(spu.offset + 0x2C80), vm::get_ptr(contextSaveStorage), 0x380); for (auto i = 6; i < 128; i++) { if (ls_pattern._bit[i]) { // TODO: Combine DMA requests for consecutive blocks into a single request - memcpy(vm::get_ptr(spu.ls_offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), 0x800); + memcpy(vm::get_ptr(spu.offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)), vm::get_ptr(contextSaveStorage + 0x400 + ((i - 6) << 11)), 0x800); } } @@ -1524,8 +1515,8 @@ void spursTasksetDispatch(SPUThread & spu) { // Restore saved registers spu.FPSCR.Write(ctxt->savedContextFpscr.value()); - spu.WriteChannel(MFC_WrTagMask, u128::from32r(ctxt->savedWriteTagGroupQueryMask)); - spu.WriteChannel(SPU_WrEventMask, u128::from32r(ctxt->savedSpuWriteEventMask)); + spu.set_ch_value(MFC_WrTagMask, ctxt->savedWriteTagGroupQueryMask); + spu.set_ch_value(SPU_WrEventMask, ctxt->savedSpuWriteEventMask); // Trace - GUID memset(&pkt, 0, sizeof(pkt)); @@ -1534,7 +1525,7 @@ void spursTasksetDispatch(SPUThread & spu) { cellSpursModulePutTrace(&pkt, 0x1F); if (elfAddr & 2) { // TODO: Figure this out - spu.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_STOP); + spu.status |= SPU_STATUS_STOPPED_BY_STOP; spu.Stop(); return; } @@ -1546,8 +1537,8 @@ void spursTasksetDispatch(SPUThread & spu) { /// Process a syscall request s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); - auto taskset = vm::get_ptr(spu.ls_offset + 0x2700); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); + auto taskset = vm::get_ptr(spu.offset + 0x2700); // If the 0x10 bit is set in syscallNum then its the 2nd version of the // syscall (e.g. cellSpursYield2 instead of cellSpursYield) and so don't wait @@ -1625,7 +1616,7 @@ s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) { cellSpursModulePutTrace(&pkt, ctxt->dmaTagId); // Clear the GUID of the task - memset(vm::get_ptr(spu.ls_offset + ctxt->guidAddr), 0, 0x10); + memset(vm::get_ptr(spu.offset + ctxt->guidAddr), 0, 0x10); if (spursTasksetPollStatus(spu)) { spursTasksetExit(spu); @@ -1639,8 +1630,8 @@ s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) { /// Initialise the Taskset PM void spursTasksetInit(SPUThread & spu, u32 pollStatus) { - auto ctxt = vm::get_ptr(spu.ls_offset + 0x2700); - auto kernelCtxt = vm::get_ptr(spu.ls_offset + 0x100); + auto ctxt = vm::get_ptr(spu.offset + 0x2700); + auto kernelCtxt = vm::get_ptr(spu.offset + 0x100); kernelCtxt->moduleId[0] = 'T'; kernelCtxt->moduleId[1] = 'K'; @@ -1688,7 +1679,7 @@ s32 spursTasksetLoadElf(SPUThread & spu, u32 * entryPoint, u32 * lowestLoadAddr, } } - loader.load_data(spu.ls_offset, skipWriteableSegments); + loader.load_data(spu.offset, skipWriteableSegments); *entryPoint = loader.m_ehdr.data_be.e_entry; if (*lowestLoadAddr) { *lowestLoadAddr = _lowestLoadAddr; diff --git a/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp b/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp index 77d3ef2fee..82f5968761 100644 --- a/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp +++ b/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp @@ -340,7 +340,7 @@ int sys_raw_spu_image_load(int id, vm::ptr img) // TODO: use segment info memcpy(vm::get_ptr(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * id), vm::get_ptr(img->addr), 256 * 1024); - vm::write32(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * id + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, (u32)img->entry_point); + vm::write32(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * id + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, img->entry_point | be_t::make(1)); return CELL_OK; } diff --git a/rpcs3/Emu/SysCalls/lv2/sys_spu.cpp b/rpcs3/Emu/SysCalls/lv2/sys_spu.cpp index 26df83c60d..8bfbef9682 100644 --- a/rpcs3/Emu/SysCalls/lv2/sys_spu.cpp +++ b/rpcs3/Emu/SysCalls/lv2/sys_spu.cpp @@ -93,7 +93,7 @@ SPUThread* spu_thread_initialize(std::shared_ptr& group, u32 spu_n SPUThread& new_thread = static_cast(Emu.GetCPU().AddThread(CPU_THREAD_SPU)); //initialize from new place: - new_thread.SetOffset(spu_offset); + new_thread.offset = spu_offset; new_thread.SetEntry(spu_ep); new_thread.SetName(name); new_thread.m_custom_task = task; @@ -104,8 +104,12 @@ SPUThread* spu_thread_initialize(std::shared_ptr& group, u32 spu_n new_thread.GPR[6] = u128::from64(0, a4); const u32 id = new_thread.GetId(); - if (group) group->list[spu_num] = id; - new_thread.group = group; + + if (group) + { + group->list[spu_num] = id; + new_thread.tg_id = group->m_id; + } sys_spu.Warning("*** New SPU Thread [%s] (ep=0x%x, opt=0x%x, a1=0x%llx, a2=0x%llx, a3=0x%llx, a4=0x%llx): id=%d, spu_offset=0x%x", name.c_str(), spu_ep, option, a1, a2, a3, a4, id, spu_offset); @@ -173,13 +177,15 @@ s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr status) std::shared_ptr thr = Emu.GetCPU().GetThread(id); - if(!thr || thr->GetType() != CPU_THREAD_SPU) + if (!thr || thr->GetType() != CPU_THREAD_SPU) { return CELL_ESRCH; } + SPUThread& spu = static_cast(*thr); + u32 res; - if (!(*(SPUThread*)thr.get()).SPU.Out_MBox.Pop(res) || !thr->IsStopped()) + if (!spu.IsStopped() || !spu.ch_out_mbox.pop(res)) { return CELL_ESTAT; } @@ -218,7 +224,7 @@ s32 sys_spu_thread_group_destroy(u32 id) std::shared_ptr t = Emu.GetCPU().GetThread(group_info->list[i]); if (t) { - Memory.MainMem.Free(((SPUThread*)t.get())->GetOffset()); + Memory.MainMem.Free(((SPUThread*)t.get())->offset); Emu.GetCPU().RemoveThread(group_info->list[i]); } } @@ -255,7 +261,7 @@ s32 sys_spu_thread_group_start(u32 id) std::shared_ptr t = Emu.GetCPU().GetThread(group_info->list[i]); if (t) { - ((SPUThread*)t.get())->SPU.Status.SetValue(SPU_STATUS_RUNNING); + ((SPUThread*)t.get())->status.write_relaxed(SPU_STATUS_RUNNING); t->Exec(); } } @@ -427,7 +433,7 @@ s32 sys_spu_thread_group_terminate(u32 id, int value) { if (std::shared_ptr t = Emu.GetCPU().GetThread(group_info->list[i])) { - ((SPUThread*)t.get())->SPU.Status.SetValue(SPU_STATUS_STOPPED); + ((SPUThread*)t.get())->status.write_relaxed(SPU_STATUS_STOPPED); t->Stop(); } } @@ -492,7 +498,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr cause, vm::ptr status) { if (!t->IsAlive()) { - if (((SPUThread*)t.get())->SPU.Status.GetValue() != SPU_STATUS_STOPPED_BY_STOP) + if (((SPUThread*)t.get())->status.read_sync() != SPU_STATUS_STOPPED_BY_STOP) { all_threads_exit = false; } @@ -566,10 +572,10 @@ s32 sys_spu_thread_write_ls(u32 id, u32 address, u64 value, u32 type) switch (type) { - case 1: (*(SPUThread*)thr.get()).WriteLS8(address, (u8)value); return CELL_OK; - case 2: (*(SPUThread*)thr.get()).WriteLS16(address, (u16)value); return CELL_OK; - case 4: (*(SPUThread*)thr.get()).WriteLS32(address, (u32)value); return CELL_OK; - case 8: (*(SPUThread*)thr.get()).WriteLS64(address, value); return CELL_OK; + case 1: (*(SPUThread*)thr.get()).write8(address, (u8)value); return CELL_OK; + case 2: (*(SPUThread*)thr.get()).write16(address, (u16)value); return CELL_OK; + case 4: (*(SPUThread*)thr.get()).write32(address, (u32)value); return CELL_OK; + case 8: (*(SPUThread*)thr.get()).write64(address, value); return CELL_OK; default: return CELL_EINVAL; } } @@ -598,10 +604,10 @@ s32 sys_spu_thread_read_ls(u32 id, u32 address, vm::ptr value, u32 type) switch (type) { - case 1: *value = (*(SPUThread*)thr.get()).ReadLS8(address); return CELL_OK; - case 2: *value = (*(SPUThread*)thr.get()).ReadLS16(address); return CELL_OK; - case 4: *value = (*(SPUThread*)thr.get()).ReadLS32(address); return CELL_OK; - case 8: *value = (*(SPUThread*)thr.get()).ReadLS64(address); return CELL_OK; + case 1: *value = (*(SPUThread*)thr.get()).read8(address); return CELL_OK; + case 2: *value = (*(SPUThread*)thr.get()).read16(address); return CELL_OK; + case 4: *value = (*(SPUThread*)thr.get()).read32(address); return CELL_OK; + case 8: *value = (*(SPUThread*)thr.get()).read64(address); return CELL_OK; default: return CELL_EINVAL; } } @@ -617,7 +623,7 @@ s32 sys_spu_thread_write_spu_mb(u32 id, u32 value) return CELL_ESRCH; } - (*(SPUThread*)thr.get()).SPU.In_MBox.PushUncond(value); + (*(SPUThread*)thr.get()).ch_in_mbox.push_uncond(value); return CELL_OK; } @@ -638,7 +644,7 @@ s32 sys_spu_thread_set_spu_cfg(u32 id, u64 value) return CELL_EINVAL; } - (*(SPUThread*)thr.get()).cfg.value = value; + (*(SPUThread*)thr.get()).snr_config = value; return CELL_OK; } @@ -654,7 +660,7 @@ s32 sys_spu_thread_get_spu_cfg(u32 id, vm::ptr value) return CELL_ESRCH; } - *value = (*(SPUThread*)thr.get()).cfg.value; + *value = (*(SPUThread*)thr.get()).snr_config; return CELL_OK; } @@ -675,7 +681,7 @@ s32 sys_spu_thread_write_snr(u32 id, u32 number, u32 value) return CELL_EINVAL; } - (*(SPUThread*)thr.get()).WriteSNR(number ? true : false, value); + (*(SPUThread*)thr.get()).write_snr(number ? true : false, value); return CELL_OK; } @@ -735,7 +741,7 @@ s32 sys_spu_thread_connect_event(u32 id, u32 eq_id, u32 et, u8 spup) SPUThread& spu = *(SPUThread*)thr.get(); - std::shared_ptr port = spu.SPUPs[spup]; + std::shared_ptr port; //= spu.SPUPs[spup]; std::lock_guard lock(port->m_mutex); @@ -775,7 +781,7 @@ s32 sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup) SPUThread& spu = *(SPUThread*)thr.get(); - std::shared_ptr port = spu.SPUPs[spup]; + std::shared_ptr port;// = spu.SPUPs[spup]; std::lock_guard lock(port->m_mutex); @@ -812,10 +818,10 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 eq_id, u32 spuq_num) return CELL_ESRCH; } - if (!(*(SPUThread*)thr.get()).SPUQs.RegisterKey(eq, FIX_SPUQ(spuq_num))) - { - return CELL_EBUSY; - } + //if (!(*(SPUThread*)thr.get()).SPUQs.RegisterKey(eq, FIX_SPUQ(spuq_num))) + //{ + // return CELL_EBUSY; + //} return CELL_OK; } @@ -831,10 +837,10 @@ s32 sys_spu_thread_unbind_queue(u32 id, u32 spuq_num) return CELL_ESRCH; } - if (!(*(SPUThread*)thr.get()).SPUQs.UnregisterKey(FIX_SPUQ(spuq_num))) - { - return CELL_ESRCH; // may be CELL_EINVAL - } + //if (!(*(SPUThread*)thr.get()).SPUQs.UnregisterKey(FIX_SPUQ(spuq_num))) + //{ + // return CELL_ESRCH; // may be CELL_EINVAL + //} return CELL_OK; } @@ -885,22 +891,22 @@ s32 sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq_id, u64 req, v bool found = true; if (req & (1ull << i)) { - for (auto& t : threads) ((SPUThread*)t.get())->SPUPs[i]->m_mutex.lock(); + //for (auto& t : threads) ((SPUThread*)t.get())->SPUPs[i]->m_mutex.lock(); - for (auto& t : threads) if (((SPUThread*)t.get())->SPUPs[i]->eq) found = false; + //for (auto& t : threads) if (((SPUThread*)t.get())->SPUPs[i]->eq) found = false; - if (found) - { - for (auto& t : threads) - { - eq->ports.add(((SPUThread*)t.get())->SPUPs[i]); - ((SPUThread*)t.get())->SPUPs[i]->eq = eq; - } - sys_spu.Warning("*** spup -> %d", i); - *spup = (u8)i; - } + //if (found) + //{ + // for (auto& t : threads) + // { + // eq->ports.add(((SPUThread*)t.get())->SPUPs[i]); + // ((SPUThread*)t.get())->SPUPs[i]->eq = eq; + // } + // sys_spu.Warning("*** spup -> %d", i); + // *spup = (u8)i; + //} - for (auto& t : threads) ((SPUThread*)t.get())->SPUPs[i]->m_mutex.unlock(); + //for (auto& t : threads) ((SPUThread*)t.get())->SPUPs[i]->m_mutex.unlock(); } else { @@ -957,6 +963,11 @@ s32 sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr { sys_spu.Warning("sys_raw_spu_create_interrupt_tag(id=%d, class_id=%d, hwthread=0x%x, intrtag_addr=0x%x)", id, class_id, hwthread, intrtag.addr()); + if (class_id != 0 && class_id != 2) + { + return CELL_EINVAL; + } + RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); if (!t) @@ -964,11 +975,6 @@ s32 sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr return CELL_ESRCH; } - if (class_id != 0 && class_id != 2) - { - return CELL_EINVAL; - } - if (t->m_intrtag[class_id].enabled) { return CELL_EAGAIN; @@ -982,21 +988,22 @@ s32 sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr s32 sys_raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask) { - sys_spu.Warning("sys_raw_spu_set_int_mask(id=%d, class_id=%d, mask=0x%llx)", id, class_id, mask); + sys_spu.Log("sys_raw_spu_set_int_mask(id=%d, class_id=%d, mask=0x%llx)", id, class_id, mask); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - if (class_id != 0 && class_id != 2) + switch (class_id) { - return CELL_EINVAL; + case 0: t->int0.mask.write_relaxed(mask); return CELL_OK; + case 2: t->int2.mask.write_relaxed(mask); return CELL_OK; } - t->m_intrtag[class_id].mask = mask; // TODO: check this - return CELL_OK; + return CELL_EINVAL; } s32 sys_raw_spu_get_int_mask(u32 id, u32 class_id, vm::ptr mask) @@ -1004,18 +1011,19 @@ s32 sys_raw_spu_get_int_mask(u32 id, u32 class_id, vm::ptr mask) sys_spu.Log("sys_raw_spu_get_int_mask(id=%d, class_id=%d, mask_addr=0x%x)", id, class_id, mask.addr()); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - if (class_id != 0 && class_id != 2) + switch (class_id) { - return CELL_EINVAL; + case 0: *mask = t->int0.mask.read_relaxed(); return CELL_OK; + case 2: *mask = t->int2.mask.read_relaxed(); return CELL_OK; } - *mask = t->m_intrtag[class_id].mask; - return CELL_OK; + return CELL_EINVAL; } s32 sys_raw_spu_set_int_stat(u32 id, u32 class_id, u64 stat) @@ -1023,18 +1031,19 @@ s32 sys_raw_spu_set_int_stat(u32 id, u32 class_id, u64 stat) sys_spu.Log("sys_raw_spu_set_int_stat(id=%d, class_id=%d, stat=0x%llx)", id, class_id, stat); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - if (class_id != 0 && class_id != 2) + switch (class_id) { - return CELL_EINVAL; + case 0: t->int0.clear(stat); return CELL_OK; + case 2: t->int2.clear(stat); return CELL_OK; } - t->m_intrtag[class_id].stat = stat; // TODO: check this - return CELL_OK; + return CELL_EINVAL; } s32 sys_raw_spu_get_int_stat(u32 id, u32 class_id, vm::ptr stat) @@ -1042,18 +1051,19 @@ s32 sys_raw_spu_get_int_stat(u32 id, u32 class_id, vm::ptr stat) sys_spu.Log("sys_raw_spu_get_int_stat(id=%d, class_id=%d, stat_addr=0xx)", id, class_id, stat.addr()); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - if (class_id != 0 && class_id != 2) + switch (class_id) { - return CELL_EINVAL; + case 0: *stat = t->int0.stat.read_relaxed(); return CELL_OK; + case 2: *stat = t->int2.stat.read_relaxed(); return CELL_OK; } - *stat = t->m_intrtag[class_id].stat; - return CELL_OK; + return CELL_EINVAL; } s32 sys_raw_spu_read_puint_mb(u32 id, vm::ptr value) @@ -1061,14 +1071,13 @@ s32 sys_raw_spu_read_puint_mb(u32 id, vm::ptr value) sys_spu.Log("sys_raw_spu_read_puint_mb(id=%d, value_addr=0x%x)", id, value.addr()); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - u32 v; - t->SPU.Out_IntrMBox.PopUncond(v); - *value = v; + *value = t->ch_out_intr_mbox.pop_uncond(); return CELL_OK; } @@ -1077,12 +1086,18 @@ s32 sys_raw_spu_set_spu_cfg(u32 id, u32 value) sys_spu.Log("sys_raw_spu_set_spu_cfg(id=%d, value=0x%x)", id, value); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - t->cfg.value = value; + if (value > 3) + { + sys_spu.Fatal("sys_raw_spu_set_spu_cfg(id=%d, value=0x%x)", id, value); + } + + t->snr_config = value; return CELL_OK; } @@ -1091,47 +1106,46 @@ s32 sys_raw_spu_get_spu_cfg(u32 id, vm::ptr value) sys_spu.Log("sys_raw_spu_get_spu_afg(id=%d, value_addr=0x%x)", id, value.addr()); RawSPUThread* t = Emu.GetCPU().GetRawSPUThread(id); + if (!t) { return CELL_ESRCH; } - *value = (u32)t->cfg.value; + *value = (u32)t->snr_config; return CELL_OK; } void sys_spu_thread_exit(SPUThread & spu, s32 status) { // Cancel any pending status update requests - u128 r; - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(0)); - while (spu.GetChannelCount(MFC_RdTagStat) != 1); - spu.ReadChannel(r, MFC_RdTagStat); + spu.set_ch_value(MFC_WrTagUpdate, 0); + while (spu.get_ch_count(MFC_RdTagStat) != 1); + spu.get_ch_value(MFC_RdTagStat); // Wait for all pending DMA operations to complete - spu.WriteChannel(MFC_WrTagMask, u128::from32r(0xFFFFFFFF)); - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(MFC_TAG_UPDATE_ALL)); - spu.ReadChannel(r, MFC_RdTagStat); + spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF); + spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL); + spu.get_ch_value(MFC_RdTagStat); - spu.WriteChannel(SPU_WrOutMbox, u128::from32r(status)); - spu.StopAndSignal(0x102); + spu.set_ch_value(SPU_WrOutMbox, status); + spu.stop_and_signal(0x102); } void sys_spu_thread_group_exit(SPUThread & spu, s32 status) { // Cancel any pending status update requests - u128 r; - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(0)); - while (spu.GetChannelCount(MFC_RdTagStat) != 1); - spu.ReadChannel(r, MFC_RdTagStat); + spu.set_ch_value(MFC_WrTagUpdate, 0); + while (spu.get_ch_count(MFC_RdTagStat) != 1); + spu.get_ch_value(MFC_RdTagStat); // Wait for all pending DMA operations to complete - spu.WriteChannel(MFC_WrTagMask, u128::from32r(0xFFFFFFFF)); - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(MFC_TAG_UPDATE_ALL)); - spu.ReadChannel(r, MFC_RdTagStat); + spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF); + spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL); + spu.get_ch_value(MFC_RdTagStat); - spu.WriteChannel(SPU_WrOutMbox, u128::from32r(status)); - spu.StopAndSignal(0x101); + spu.set_ch_value(SPU_WrOutMbox, status); + spu.stop_and_signal(0x101); } s32 sys_spu_thread_send_event(SPUThread & spu, u8 spup, u32 data0, u32 data1) @@ -1141,44 +1155,42 @@ s32 sys_spu_thread_send_event(SPUThread & spu, u8 spup, u32 data0, u32 data1) return CELL_EINVAL; } - if (spu.GetChannelCount(SPU_RdInMbox)) + if (spu.get_ch_count(SPU_RdInMbox)) { return CELL_EBUSY; } - spu.WriteChannel(SPU_WrOutMbox, u128::from32r(data1)); - spu.WriteChannel(SPU_WrOutIntrMbox, u128::from32r((spup << 24) | (data0 & 0x00FFFFFF))); + spu.set_ch_value(SPU_WrOutMbox, data1); + spu.set_ch_value(SPU_WrOutIntrMbox, (spup << 24) | (data0 & 0x00FFFFFF)); - u128 r; - spu.ReadChannel(r, SPU_RdInMbox); - return r._u32[3]; + return spu.get_ch_value(SPU_RdInMbox); } s32 sys_spu_thread_switch_system_module(SPUThread & spu, u32 status) { - if (spu.GetChannelCount(SPU_RdInMbox)) + if (spu.get_ch_count(SPU_RdInMbox)) { return CELL_EBUSY; } // Cancel any pending status update requests - u128 r; - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(0)); - while (spu.GetChannelCount(MFC_RdTagStat) != 1); - spu.ReadChannel(r, MFC_RdTagStat); + spu.set_ch_value(MFC_WrTagUpdate, 0); + while (spu.get_ch_count(MFC_RdTagStat) != 1); + spu.get_ch_value(MFC_RdTagStat); // Wait for all pending DMA operations to complete - spu.WriteChannel(MFC_WrTagMask, u128::from32r(0xFFFFFFFF)); - spu.WriteChannel(MFC_WrTagUpdate, u128::from32r(MFC_TAG_UPDATE_ALL)); - spu.ReadChannel(r, MFC_RdTagStat); + spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF); + spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL); + spu.get_ch_value(MFC_RdTagStat); + + s32 result; do { - spu.WriteChannel(SPU_WrOutMbox, u128::from32r(status)); - spu.StopAndSignal(0x120); - spu.ReadChannel(r, SPU_RdInMbox); + spu.set_ch_value(SPU_WrOutMbox, status); + spu.stop_and_signal(0x120); } - while (r._u32[3] == CELL_EBUSY); + while ((result = spu.get_ch_value(SPU_RdInMbox)) == CELL_EBUSY); - return r._u32[3]; + return result; } diff --git a/rpcs3/emucore.vcxproj b/rpcs3/emucore.vcxproj index 96f3b886d4..2670d6e264 100644 --- a/rpcs3/emucore.vcxproj +++ b/rpcs3/emucore.vcxproj @@ -369,6 +369,7 @@ + diff --git a/rpcs3/emucore.vcxproj.filters b/rpcs3/emucore.vcxproj.filters index c6b9461933..416790dbce 100644 --- a/rpcs3/emucore.vcxproj.filters +++ b/rpcs3/emucore.vcxproj.filters @@ -1537,5 +1537,8 @@ Emu\SysCalls\Modules + + Emu\CPU\Cell + \ No newline at end of file