SPU: Implement events channel count, minor interrupts fixes

This commit is contained in:
eladash 2018-08-06 10:19:47 +03:00 committed by kd-11
parent cdc3ee6c1c
commit 36ac68b436
7 changed files with 160 additions and 267 deletions

View file

@ -1082,7 +1082,7 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret)
auto _throw = [](spu_thread* _spu)
{
_spu->state += cpu_flag::dbg_pause;
spu_log.fatal("SPU Interrupts not implemented (mask=0x%x)", +_spu->ch_event_mask);
spu_log.fatal("SPU Interrupts not implemented (mask=0x%x)", +_spu->ch_events.load().mask);
spu_runtime::g_escape(_spu);
};
@ -1090,13 +1090,14 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret)
Label intr = c->newLabel();
Label fail = c->newLabel();
c->mov(SPU_OFF_8(interrupts_enabled), 1);
c->mov(qw1->r32(), SPU_OFF_32(ch_event_mask));
c->mov(*qw1, SPU_OFF_64(ch_events));
c->ror(*qw1, 32);
c->test(qw1->r32(), ~SPU_EVENT_INTR_IMPLEMENTED);
c->ror(*qw1, 32);
c->jnz(fail);
c->and_(qw1->r32(), SPU_OFF_32(ch_event_stat));
c->test(qw1->r32(), SPU_EVENT_INTR_IMPLEMENTED);
c->jnz(intr);
c->mov(SPU_OFF_8(interrupts_enabled), 1);
c->bt(qw1->r32(), 31);
c->jc(intr);
c->jmp(no_intr);
c->bind(fail);
c->mov(SPU_OFF_32(pc), *addr);
@ -1245,125 +1246,6 @@ void spu_recompiler::fall(spu_opcode_t op)
c->call(asmjit::imm_ptr<void(*)(spu_thread*, u32, spu_inter_func_t)>(gate));
}
void spu_recompiler::get_events()
{
using namespace asmjit;
Label label1 = c->newLabel();
Label rcheck = c->newLabel();
Label tcheck = c->newLabel();
Label label2 = c->newLabel();
// Check if reservation exists
c->mov(*addr, SPU_OFF_32(raddr));
c->test(*addr, *addr);
c->jnz(rcheck);
// Reservation check (unlikely)
after.emplace_back([=, this]()
{
Label fail = c->newLabel();
c->bind(rcheck);
c->mov(qw1->r32(), *addr);
c->mov(*qw0, imm_ptr(+vm::g_reservations));
c->and_(qw1->r32(), 0xff80);
c->shr(qw1->r32(), 1);
c->mov(*qw0, x86::qword_ptr(*qw0, *qw1));
c->cmp(*qw0, SPU_OFF_64(rtime));
c->jne(fail);
c->mov(*qw0, imm_ptr(vm::g_base_addr));
if (utils::has_avx())
{
c->vmovups(x86::ymm0, x86::yword_ptr(*cpu, offset32(&spu_thread::rdata) + 0));
c->vxorps(x86::ymm1, x86::ymm0, x86::yword_ptr(*qw0, *addr, 0, 0));
c->vmovups(x86::ymm0, x86::yword_ptr(*cpu, offset32(&spu_thread::rdata) + 32));
c->vxorps(x86::ymm2, x86::ymm0, x86::yword_ptr(*qw0, *addr, 0, 32));
c->vmovups(x86::ymm0, x86::yword_ptr(*cpu, offset32(&spu_thread::rdata) + 64));
c->vxorps(x86::ymm3, x86::ymm0, x86::yword_ptr(*qw0, *addr, 0, 64));
c->vmovups(x86::ymm0, x86::yword_ptr(*cpu, offset32(&spu_thread::rdata) + 96));
c->vxorps(x86::ymm4, x86::ymm0, x86::yword_ptr(*qw0, *addr, 0, 96));
c->vorps(x86::ymm0, x86::ymm1, x86::ymm2);
c->vorps(x86::ymm1, x86::ymm3, x86::ymm4);
c->vorps(x86::ymm0, x86::ymm1, x86::ymm0);
c->vptest(x86::ymm0, x86::ymm0);
c->vzeroupper();
c->jz(label1);
}
else
{
c->movaps(x86::xmm0, x86::dqword_ptr(*qw0, *addr));
c->xorps(x86::xmm0, x86::dqword_ptr(*cpu, offset32(&spu_thread::rdata) + 0));
for (u32 i = 16; i < 128; i += 16)
{
c->movaps(x86::xmm1, x86::dqword_ptr(*qw0, *addr, 0, i));
c->xorps(x86::xmm1, x86::dqword_ptr(*cpu, offset32(&spu_thread::rdata) + i));
c->orps(x86::xmm0, x86::xmm1);
}
if (utils::has_sse41())
{
c->ptest(x86::xmm0, x86::xmm0);
c->jz(label1);
}
else
{
c->packssdw(x86::xmm0, x86::xmm0);
c->movq(x86::rax, x86::xmm0);
c->test(x86::rax, x86::rax);
c->jz(label1);
}
}
c->bind(fail);
c->lock().bts(SPU_OFF_32(ch_event_stat), 10);
c->mov(SPU_OFF_32(raddr), 0);
c->jmp(label1);
});
c->bind(label1);
c->jmp(tcheck);
// Check decrementer event (unlikely)
after.emplace_back([=, this]()
{
auto sub = [](spu_thread* _spu)
{
_spu->get_events(SPU_EVENT_TM);
};
c->bind(tcheck);
c->mov(*arg0, *cpu);
c->call(imm_ptr<void(*)(spu_thread*)>(sub));
c->jmp(label2);
});
Label fail = c->newLabel();
after.emplace_back([=, this]()
{
auto _throw = [](spu_thread* _spu)
{
_spu->state += cpu_flag::dbg_pause;
spu_log.fatal("SPU Events not implemented (mask=0x%x).", +_spu->ch_event_mask);
spu_runtime::g_escape(_spu);
};
c->bind(fail);
c->mov(*arg0, *cpu);
c->add(x86::rsp, 0x28);
c->jmp(imm_ptr<void(*)(spu_thread*)>(_throw));
});
// Load active events into addr
c->bind(label2);
c->mov(*addr, SPU_OFF_32(ch_event_stat));
c->mov(qw1->r32(), SPU_OFF_32(ch_event_mask));
c->test(qw1->r32(), ~SPU_EVENT_IMPLEMENTED);
c->jnz(fail);
c->and_(*addr, qw1->r32());
}
void spu_recompiler::UNK(spu_opcode_t op)
{
auto gate = [](spu_thread* _spu, u32 op)
@ -1607,7 +1489,8 @@ void spu_recompiler::RDCH(spu_opcode_t op)
case SPU_RdEventMask:
{
const XmmLink& vr = XmmAlloc();
c->movd(vr, SPU_OFF_32(ch_event_mask));
c->movq(vr, SPU_OFF_64(ch_events));
c->psrldq(vr, 4);
c->pslldq(vr, 12);
c->movdqa(SPU_OFF_128(gpr, op.rt), vr);
return;
@ -1615,28 +1498,7 @@ void spu_recompiler::RDCH(spu_opcode_t op)
case SPU_RdEventStat:
{
spu_log.warning("[0x%x] RDCH: RdEventStat", m_pos);
get_events();
Label wait = c->newLabel();
Label ret = c->newLabel();
c->jz(wait);
after.emplace_back([=, this, pos = m_pos]
{
c->bind(wait);
c->lea(addr->r64(), get_pc(pos));
c->and_(*addr, 0x3fffc);
c->mov(SPU_OFF_32(pc), *addr);
c->mov(arg1->r32(), op.ra);
c->mov(*arg0, *cpu);
c->call(imm_ptr(spu_rdch));
c->jmp(ret);
});
c->bind(ret);
c->movd(x86::xmm0, *addr);
c->pslldq(x86::xmm0, 12);
c->movdqa(SPU_OFF_128(gpr, op.rt), x86::xmm0);
return;
break; // TODO
}
case SPU_RdMachStat:
{
@ -1729,10 +1591,7 @@ void spu_recompiler::RCHCNT(spu_opcode_t op)
case SPU_RdEventStat:
{
spu_log.warning("[0x%x] RCHCNT: RdEventStat", m_pos);
get_events();
c->setnz(addr->r8());
c->movzx(*addr, addr->r8());
break;
[[fallthrough]]; // fallback
}
default:
{
@ -2603,16 +2462,13 @@ void spu_recompiler::WRCH(spu_opcode_t op)
}
case SPU_WrEventMask:
{
c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3));
c->mov(SPU_OFF_32(ch_event_mask), qw0->r32());
return;
// TODO
break;
}
case SPU_WrEventAck:
{
c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3));
c->not_(qw0->r32());
c->lock().and_(SPU_OFF_32(ch_event_stat), qw0->r32());
return;
// TODO
break;
}
case 69:
{
@ -2761,7 +2617,10 @@ void spu_recompiler::IRET(spu_opcode_t op)
void spu_recompiler::BISLED(spu_opcode_t op)
{
get_events();
auto get_events = [](spu_thread* _spu) -> u32
{
return _spu->get_events(_spu->ch_events.load().mask).count;
};
c->mov(*addr, SPU_OFF_32(gpr, op.ra, &v128::_u32, 3));
@ -2773,6 +2632,9 @@ void spu_recompiler::BISLED(spu_opcode_t op)
c->movdqa(SPU_OFF_128(gpr, op.rt), vr);
asmjit::Label branch_label = c->newLabel();
c->mov(*arg0, *cpu);
c->call(asmjit::imm_ptr<u32(*)(spu_thread*)>(get_events));
c->test(*addr, 1);
c->jne(branch_label);
after.emplace_back([=, this]()

View file

@ -95,8 +95,6 @@ private:
void branch_set_link(u32 target);
void fall(spu_opcode_t op);
void get_events();
public:
void UNK(spu_opcode_t op);

View file

@ -518,7 +518,7 @@ bool spu_interpreter::BISLED(spu_thread& spu, spu_opcode_t op)
const u32 target = spu_branch_target(spu.gpr[op.ra]._u32[3]);
spu.gpr[op.rt] = v128::from32r(spu_branch_target(spu.pc + 4));
if (spu.get_events())
if (spu.get_events().count)
{
spu.pc = target;
set_interrupt_status(spu, op);

View file

@ -5387,11 +5387,6 @@ public:
static u32 exec_read_events(spu_thread* _spu)
{
if (const u32 events = _spu->get_events(_spu->ch_event_mask))
{
return events;
}
// TODO
return exec_rdch(_spu, SPU_RdEventStat);
}
@ -5490,7 +5485,7 @@ public:
}
case SPU_RdEventMask:
{
res.value = m_ir->CreateLoad(spu_ptr<u32>(&spu_thread::ch_event_mask));
res.value = m_ir->CreateTrunc(m_ir->CreateLShr(m_ir->CreateLoad(spu_ptr<u64>(&spu_thread::ch_events), true), 32), get_type<u32>());
break;
}
case SPU_RdEventStat:
@ -5524,7 +5519,7 @@ public:
static u32 exec_get_events(spu_thread* _spu, u32 mask)
{
return _spu->get_events(mask);
return _spu->get_events(mask).count;
}
llvm::Value* get_rchcnt(u32 off, u64 inv = 0)
@ -5602,9 +5597,8 @@ public:
}
case SPU_RdEventStat:
{
res.value = call("spu_get_events", &exec_get_events, m_thread, m_ir->CreateLoad(spu_ptr<u32>(&spu_thread::ch_event_mask)));
res.value = m_ir->CreateICmpNE(res.value, m_ir->getInt32(0));
res.value = m_ir->CreateZExt(res.value, get_type<u32>());
const auto mask = m_ir->CreateTrunc(m_ir->CreateLShr(m_ir->CreateLoad(spu_ptr<u64>(&spu_thread::ch_events), true), 32), get_type<u32>());
res.value = call("spu_get_events", &exec_get_events, m_thread, mask);
break;
}
@ -6097,18 +6091,6 @@ public:
m_ir->CreateStore(val.value, spu_ptr<u32>(&spu_thread::ch_dec_value));
return;
}
case SPU_WrEventMask:
{
m_ir->CreateStore(val.value, spu_ptr<u32>(&spu_thread::ch_event_mask))->setVolatile(true);
return;
}
case SPU_WrEventAck:
{
// "Collect" events before final acknowledgment
call("spu_get_events", &exec_get_events, m_thread, val.value);
m_ir->CreateAtomicRMW(llvm::AtomicRMWInst::And, spu_ptr<u32>(&spu_thread::ch_event_stat), eval(~val).value, llvm::AtomicOrdering::Release);
return;
}
case 69:
{
return;
@ -8280,7 +8262,7 @@ public:
{
_spu->set_interrupt_status(true);
if ((_spu->ch_event_mask & _spu->ch_event_stat & SPU_EVENT_INTR_IMPLEMENTED) > 0)
if (_spu->ch_events.load().count)
{
_spu->interrupts_enabled = false;
_spu->srr0 = addr;
@ -8586,7 +8568,8 @@ public:
if (m_block) m_block->block_end = m_ir->GetInsertBlock();
const auto addr = eval(extract(get_vr(op.ra), 3) & 0x3fffc);
set_link(op);
const auto res = call("spu_get_events", &exec_get_events, m_thread, m_ir->CreateLoad(spu_ptr<u32>(&spu_thread::ch_event_mask)));
const auto mask = m_ir->CreateTrunc(m_ir->CreateLShr(m_ir->CreateLoad(spu_ptr<u64>(&spu_thread::ch_events), true), 32), get_type<u32>());
const auto res = call("spu_get_events", &exec_get_events, m_thread, mask);
const auto target = add_block_indirect(op, addr);
m_ir->CreateCondBr(m_ir->CreateICmpNE(res, m_ir->getInt32(0)), target, add_block_next());
}

View file

@ -858,8 +858,11 @@ std::string spu_thread::dump_regs() const
fmt::append(ret, "r%d: %s\n", i, gpr[i]);
}
fmt::append(ret, "\nEvent Stat: 0x%x\n", +ch_event_stat);
fmt::append(ret, "Event Mask: 0x%x\n", +ch_event_mask);
const auto events = ch_events.load();
fmt::append(ret, "\nEvent Stat: 0x%x\n", events.events);
fmt::append(ret, "Event Mask: 0x%x\n", events.mask);
fmt::append(ret, "Event Count: %u\n", events.count);
fmt::append(ret, "SRR0: 0x%05x\n", srr0);
fmt::append(ret, "Stall Stat: %s\n", ch_stall_stat);
fmt::append(ret, "Stall Mask: 0x%x\n", ch_stall_mask);
@ -872,7 +875,7 @@ std::string spu_thread::dump_regs() const
fmt::append(ret, "Reservation Addr: none\n");
fmt::append(ret, "Atomic Stat: %s\n", ch_atomic_stat); // TODO: use mfc_atomic_status formatting
fmt::append(ret, "Interrupts Enabled: %s\n", interrupts_enabled.load());
fmt::append(ret, "Interrupts: %s\n", interrupts_enabled ? "Enabled" : "Disabled");
fmt::append(ret, "Inbound Mailbox: %s\n", ch_in_mbox);
fmt::append(ret, "Out Mailbox: %s\n", ch_out_mbox);
fmt::append(ret, "Out Interrupts Mailbox: %s\n", ch_out_intr_mbox);
@ -975,9 +978,8 @@ void spu_thread::cpu_init()
ch_out_mbox.data.raw() = {};
ch_out_intr_mbox.data.raw() = {};
ch_event_mask.raw() = 0;
ch_event_stat.raw() = 0;
interrupts_enabled.raw() = false;
ch_events.raw() = {};
interrupts_enabled = false;
raddr = 0;
ch_dec_start_timestamp = get_timebased_time();
@ -1805,7 +1807,7 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
if (!ch_stall_stat.get_count())
{
ch_event_stat |= SPU_EVENT_SN;
set_events(SPU_EVENT_SN);
}
ch_stall_stat.set_value(utils::rol32(1, args.tag) | ch_stall_stat.get_value());
@ -1926,7 +1928,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
// Last check for event before we clear the reservation
if (raddr == addr || rtime != (vm::reservation_acquire(raddr, 128) & (-128 | vm::dma_lockb)) || !cmp_rdata(rdata, vm::_ref<decltype(rdata)>(raddr)))
{
ch_event_stat |= SPU_EVENT_LR;
set_events(SPU_EVENT_LR);
}
}
@ -2137,9 +2139,8 @@ void spu_thread::do_mfc(bool wait)
bool spu_thread::check_mfc_interrupts(u32 next_pc)
{
if (interrupts_enabled && (ch_event_mask & ch_event_stat & SPU_EVENT_INTR_IMPLEMENTED) > 0)
if (ch_events.load().count && std::exchange(interrupts_enabled, false))
{
interrupts_enabled.release(false);
srr0 = next_pc;
// Test for BR/BRA instructions (they are equivalent at zero pc)
@ -2251,7 +2252,7 @@ bool spu_thread::process_mfc_cmd()
// Last check for event before we replace the reservation with a new one
if ((vm::reservation_acquire(raddr, 128) & (-128 | vm::dma_lockb)) != rtime || !cmp_rdata(rdata, vm::_ref<decltype(rdata)>(raddr)))
{
ch_event_stat |= SPU_EVENT_LR;
set_events(SPU_EVENT_LR);
}
}
else if (raddr == addr)
@ -2259,7 +2260,7 @@ bool spu_thread::process_mfc_cmd()
// Lost previous reservation on polling
if (ntime != rtime || !cmp_rdata(rdata, dst))
{
ch_event_stat |= SPU_EVENT_LR;
set_events(SPU_EVENT_LR);
}
}
@ -2413,19 +2414,19 @@ bool spu_thread::process_mfc_cmd()
ch_mfc_cmd.cmd, ch_mfc_cmd.lsa, ch_mfc_cmd.eal, ch_mfc_cmd.tag, ch_mfc_cmd.size);
}
u32 spu_thread::get_events(u32 mask_hint, bool waiting)
spu_thread::ch_events_t spu_thread::get_events(u32 mask_hint, bool waiting, bool reading)
{
const u32 mask1 = ch_event_mask;
if (mask1 & ~SPU_EVENT_IMPLEMENTED)
if (auto mask1 = ch_events.load().mask; mask1 & ~SPU_EVENT_IMPLEMENTED)
{
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, mask1);
}
u32 collect = 0;
// Check reservation status and set SPU_EVENT_LR if lost
if (mask_hint & SPU_EVENT_LR && raddr && ((vm::reservation_acquire(raddr, sizeof(rdata)) & -128) != rtime || !cmp_rdata(rdata, vm::_ref<decltype(rdata)>(raddr))))
{
ch_event_stat |= SPU_EVENT_LR;
collect |= SPU_EVENT_LR;
raddr = 0;
}
@ -2436,42 +2437,47 @@ u32 spu_thread::get_events(u32 mask_hint, bool waiting)
{
// Set next event to the next time the decrementer underflows
ch_dec_start_timestamp -= res << 32;
if ((ch_event_stat & SPU_EVENT_TM) == 0)
{
ch_event_stat |= SPU_EVENT_TM;
}
collect |= SPU_EVENT_TM;
}
}
// Simple polling or polling with atomically set/removed SPU_EVENT_WAITING flag
return !waiting ? ch_event_stat & mask1 : ch_event_stat.atomic_op([&](u32& stat) -> u32
if (collect)
{
if (u32 res = stat & mask1)
{
stat &= ~SPU_EVENT_WAITING;
return res;
}
set_events(collect);
}
stat |= SPU_EVENT_WAITING;
return 0;
});
return ch_events.fetch_op([&](ch_events_t& events)
{
if (!reading)
return false;
if (waiting)
events.waiting = !events.count;
events.count = false;
return true;
}).first;
}
void spu_thread::set_events(u32 mask)
void spu_thread::set_events(u32 bits)
{
if (mask & ~SPU_EVENT_IMPLEMENTED)
{
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, mask);
}
ASSUME(!(bits & ~0xffff));
// Set new events, get old event mask
const u32 old_stat = ch_event_stat.fetch_or(mask);
// Notify if some events were set
if (~old_stat & mask && old_stat & SPU_EVENT_WAITING && ch_event_stat & SPU_EVENT_WAITING)
if (ch_events.atomic_op([&](ch_events_t& events)
{
notify();
events.events |= bits;
// If one masked event was fired, set the channel count (even if the event bit was already 1)
if (events.mask & bits)
{
events.count = true;
return !!events.waiting;
}
return false;
}))
{
// Preserved for external events implementation
//notify();
}
}
@ -2480,17 +2486,13 @@ void spu_thread::set_interrupt_status(bool enable)
if (enable)
{
// Detect enabling interrupts with events masked
if (ch_event_mask & ~SPU_EVENT_INTR_IMPLEMENTED)
if (auto mask = ch_events.load().mask; mask & ~SPU_EVENT_INTR_IMPLEMENTED)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, +ch_event_mask);
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, mask);
}
}
interrupts_enabled = true;
}
else
{
interrupts_enabled = false;
}
interrupts_enabled = enable;
}
u32 spu_thread::get_ch_count(u32 ch)
@ -2508,7 +2510,7 @@ u32 spu_thread::get_ch_count(u32 ch)
case SPU_RdSigNotify1: return ch_snr1.get_count();
case SPU_RdSigNotify2: return ch_snr2.get_count();
case MFC_RdAtomicStat: return ch_atomic_stat.get_count();
case SPU_RdEventStat: return get_events() != 0;
case SPU_RdEventStat: return get_events().count;
case MFC_Cmd: return 16 - mfc_size;
}
@ -2645,18 +2647,17 @@ s64 spu_thread::get_ch_value(u32 ch)
case SPU_RdEventMask:
{
return ch_event_mask;
return ch_events.load().mask;
}
case SPU_RdEventStat:
{
const u32 mask1 = ch_event_mask;
const u32 mask1 = ch_events.load().mask;
auto events = get_events(mask1, false, true);
u32 res = get_events(mask1);
if (res)
if (events.count)
{
return res;
return events.events & mask1;
}
spu_function_logger logger(*this, "MFC Events read");
@ -2669,7 +2670,7 @@ s64 spu_thread::get_ch_value(u32 ch)
fmt::throw_exception("Not supported: event mask 0x%x" HERE, mask1);
}
while (res = get_events(mask1), !res)
for (; !events.count; events = get_events(mask1, false, true))
{
state += cpu_flag::wait;
@ -2682,10 +2683,10 @@ s64 spu_thread::get_ch_value(u32 ch)
}
check_state();
return res;
return events.events & mask1;
}
while (res = get_events(mask1, true), !res)
for (; !events.count; events = get_events(mask1, true, true))
{
state += cpu_flag::wait;
@ -2698,7 +2699,7 @@ s64 spu_thread::get_ch_value(u32 ch)
}
check_state();
return res;
return events.events & mask1;
}
case SPU_RdMachStat:
@ -3006,7 +3007,28 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventMask:
{
ch_event_mask = value;
get_events(value);
if (ch_events.atomic_op([&](ch_events_t& events)
{
events.mask = value;
if (events.events & events.mask)
{
events.count = true;
return true;
}
return false;
}))
{
// Check interrupts in case count is 1
if (check_mfc_interrupts(pc + 4))
{
spu_runtime::g_escape(this);
}
}
return true;
}
@ -3014,7 +3036,27 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
{
// "Collect" events before final acknowledgment
get_events(value);
ch_event_stat &= ~value;
if (ch_events.atomic_op([&](ch_events_t& events)
{
events.events &= ~value;
if (events.events & events.mask)
{
events.count = true;
return true;
}
return false;
}))
{
// Check interrupts in case count is 1
if (check_mfc_interrupts(pc + 4))
{
spu_runtime::g_escape(this);
}
}
return true;
}

View file

@ -75,11 +75,7 @@ enum : u32
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR | SPU_EVENT_TM | SPU_EVENT_SN, // Mask of implemented events
SPU_EVENT_INTR_IMPLEMENTED = SPU_EVENT_SN,
SPU_EVENT_WAITING = 0x80000000, // Originally unused, set when SPU thread starts waiting on ch_event_stat
//SPU_EVENT_AVAILABLE = 0x40000000, // Originally unused, channel count of the SPU_RdEventStat channel
//SPU_EVENT_INTR_ENABLED = 0x20000000, // Originally unused, represents "SPU Interrupts Enabled" status
SPU_EVENT_INTR_TEST = SPU_EVENT_INTR_IMPLEMENTED
SPU_EVENT_INTR_TEST = SPU_EVENT_INTR_IMPLEMENTED,
};
// SPU Class 0 Interrupts
@ -691,9 +687,17 @@ public:
spu_channel ch_snr1{}; // SPU Signal Notification Register 1
spu_channel ch_snr2{}; // SPU Signal Notification Register 2
atomic_t<u32> ch_event_mask;
atomic_t<u32> ch_event_stat;
atomic_t<bool> interrupts_enabled;
union ch_events_t
{
u64 all;
bf_t<u64, 0, 16> events;
bf_t<u64, 30, 1> waiting;
bf_t<u64, 31, 1> count;
bf_t<u64, 32, 32> mask;
};
atomic_t<ch_events_t> ch_events;
bool interrupts_enabled;
u64 ch_dec_start_timestamp; // timestamp of writing decrementer value
u32 ch_dec_value; // written decrementer value
@ -751,8 +755,8 @@ public:
u32 get_mfc_completed();
bool process_mfc_cmd();
u32 get_events(u32 mask_hint = -1, bool waiting = false);
void set_events(u32 mask);
ch_events_t get_events(u32 mask_hint = -1, bool waiting = false, bool reading = false);
void set_events(u32 bits);
void set_interrupt_status(bool enable);
bool check_mfc_interrupts(u32 nex_pc);
u32 get_ch_count(u32 ch);

View file

@ -40,6 +40,7 @@ enum registers : int
PPU_VRSAVE,
MFC_PEVENTS,
MFC_EVENTS_MASK,
MFC_EVENTS_COUNT,
MFC_TAG_UPD,
MFC_TAG_MASK,
MFC_ATOMIC_STAT,
@ -113,6 +114,7 @@ register_editor_dialog::register_editor_dialog(QWidget *parent, u32 _pc, const s
for (int i = spu_r0; i <= spu_r127; i++) m_register_combo->addItem(qstr(fmt::format("r%d", i % 128)), i);
m_register_combo->addItem("MFC Pending Events", +MFC_PEVENTS);
m_register_combo->addItem("MFC Events Mask", +MFC_EVENTS_MASK);
m_register_combo->addItem("MFC Events Count", +MFC_EVENTS_COUNT);
m_register_combo->addItem("MFC Tag Mask", +MFC_TAG_MASK);
//m_register_combo->addItem("MFC Tag Update", +MFC_TAG_UPD);
//m_register_combo->addItem("MFC Atomic Status", +MFC_ATOMIC_STAT);
@ -187,8 +189,9 @@ void register_editor_dialog::updateRegister(int reg)
const u32 reg_index = reg % 128;
str = fmt::format("%016llx%016llx", spu.gpr[reg_index]._u64[1], spu.gpr[reg_index]._u64[0]);
}
else if (reg == MFC_PEVENTS) str = fmt::format("%08x", +spu.ch_event_stat);
else if (reg == MFC_EVENTS_MASK) str = fmt::format("%08x", +spu.ch_event_mask);
else if (reg == MFC_PEVENTS) str = fmt::format("%08x", +spu.ch_events.load().events);
else if (reg == MFC_EVENTS_MASK) str = fmt::format("%08x", +spu.ch_events.load().mask);
else if (reg == MFC_EVENTS_COUNT) str = fmt::format("%u", +spu.ch_events.load().count);
else if (reg == MFC_TAG_MASK) str = fmt::format("%08x", spu.ch_tag_mask);
else if (reg == SPU_SRR0) str = fmt::format("%08x", spu.srr0);
else if (reg == SPU_SNR1) str = fmt::format("%s", spu.ch_snr1);
@ -318,11 +321,12 @@ void register_editor_dialog::OnOkay(const std::shared_ptr<cpu_thread>& _cpu)
if (u32 reg_value; check_res(std::from_chars(value.c_str() + 24, value.c_str() + 32, reg_value, 16), value.c_str() + 32))
{
bool ok = true;
if (reg == MFC_PEVENTS && !(reg_value & ~SPU_EVENT_IMPLEMENTED)) spu.ch_event_stat = reg_value;
else if (reg == MFC_EVENTS_MASK && !(reg_value & ~SPU_EVENT_IMPLEMENTED)) spu.ch_event_mask = reg_value;
if (reg == MFC_PEVENTS && !(reg_value & ~SPU_EVENT_IMPLEMENTED)) spu.ch_events.atomic_op([&](typename spu_thread::ch_events_t& events){ events.events = reg_value; });
else if (reg == MFC_EVENTS_MASK && !(reg_value & ~SPU_EVENT_IMPLEMENTED)) spu.ch_events.atomic_op([&](typename spu_thread::ch_events_t& events){ events.mask = reg_value; });
else if (reg == MFC_EVENTS_COUNT && reg_value <= 1u) spu.ch_events.atomic_op([&](typename spu_thread::ch_events_t& events){ events.count = reg_value; });
else if (reg == MFC_TAG_MASK) spu.ch_tag_mask = reg_value;
else if (reg == SPU_SRR0) spu.srr0 = reg_value & 0x3fffc;
else if (reg == PC) spu.pc = reg_value & 0x3fffc;
else if (reg == SPU_SRR0 && !(reg_value & ~0x3fffc)) spu.srr0 = reg_value;
else if (reg == PC && !(reg_value & ~0x3fffc)) spu.pc = reg_value;
else ok = false;
if (ok) return;