SPU channel optimization (#1675)

This commit is contained in:
Ivan 2016-04-19 16:04:02 +03:00
parent 81780d1932
commit efc8779275
7 changed files with 216 additions and 255 deletions

View file

@ -132,7 +132,7 @@ bool spursDma(SPUThread& spu, u32 cmd, u64 ea, u32 lsa, u32 size, u32 tag)
{
u32 rv;
rv = spu.get_ch_value(MFC_RdAtomicStat);
spu.get_ch_value(MFC_RdAtomicStat, rv);
auto success = rv ? true : false;
success = cmd == MFC_PUTLLC_CMD ? !success : success;
return success;
@ -146,7 +146,7 @@ u32 spursDmaGetCompletionStatus(SPUThread& spu, u32 tagMask)
{
spu.set_ch_value(MFC_WrTagMask, tagMask);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_IMMEDIATE);
return spu.get_ch_value(MFC_RdTagStat);
u32 rv; spu.get_ch_value(MFC_RdTagStat, rv); return rv;
}
// Wait for DMA operations to complete
@ -154,7 +154,7 @@ u32 spursDmaWaitForCompletion(SPUThread& spu, u32 tagMask, bool waitForAll)
{
spu.set_ch_value(MFC_WrTagMask, tagMask);
spu.set_ch_value(MFC_WrTagUpdate, waitForAll ? MFC_TAG_UPDATE_ALL : MFC_TAG_UPDATE_ANY);
return spu.get_ch_value(MFC_RdTagStat);
u32 rv; spu.get_ch_value(MFC_RdTagStat, rv); return rv;
}
// Halt the SPU
@ -163,6 +163,89 @@ void spursHalt(SPUThread& spu)
spu.halt();
}
void sys_spu_thread_exit(SPUThread& spu, s32 status)
{
u32 _v;
// Cancel any pending status update requests
spu.set_ch_value(MFC_WrTagUpdate, 0);
while (spu.get_ch_count(MFC_RdTagStat) != 1);
spu.get_ch_value(MFC_RdTagStat, _v);
// Wait for all pending DMA operations to complete
spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
spu.get_ch_value(MFC_RdTagStat, _v);
spu.set_ch_value(SPU_WrOutMbox, status);
spu.stop_and_signal(0x102);
}
void sys_spu_thread_group_exit(SPUThread& spu, s32 status)
{
u32 _v;
// Cancel any pending status update requests
spu.set_ch_value(MFC_WrTagUpdate, 0);
while (spu.get_ch_count(MFC_RdTagStat) != 1);
spu.get_ch_value(MFC_RdTagStat, _v);
// Wait for all pending DMA operations to complete
spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
spu.get_ch_value(MFC_RdTagStat, _v);
spu.set_ch_value(SPU_WrOutMbox, status);
spu.stop_and_signal(0x101);
}
s32 sys_spu_thread_send_event(SPUThread& spu, u8 spup, u32 data0, u32 data1)
{
if (spup > 0x3F)
{
return CELL_EINVAL;
}
if (spu.get_ch_count(SPU_RdInMbox))
{
return CELL_EBUSY;
}
spu.set_ch_value(SPU_WrOutMbox, data1);
spu.set_ch_value(SPU_WrOutIntrMbox, (spup << 24) | (data0 & 0x00FFFFFF));
spu.get_ch_value(SPU_RdInMbox, data0);
return data0;
}
s32 sys_spu_thread_switch_system_module(SPUThread& spu, u32 status)
{
if (spu.get_ch_count(SPU_RdInMbox))
{
return CELL_EBUSY;
}
u32 result;
// Cancel any pending status update requests
spu.set_ch_value(MFC_WrTagUpdate, 0);
while (spu.get_ch_count(MFC_RdTagStat) != 1);
spu.get_ch_value(MFC_RdTagStat, result);
// Wait for all pending DMA operations to complete
spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
spu.get_ch_value(MFC_RdTagStat, result);
do
{
spu.set_ch_value(SPU_WrOutMbox, status);
spu.stop_and_signal(0x120);
spu.get_ch_value(SPU_RdInMbox, result);
}
while (result == CELL_EBUSY);
return result;
}
//----------------------------------------------------------------------------
// SPURS kernel functions
//----------------------------------------------------------------------------
@ -1643,8 +1726,9 @@ s32 spursTasketSaveTaskContext(SPUThread& spu)
v128 r;
spu.fpscr.Read(r);
ctxt->savedContextFpscr = r;
ctxt->savedSpuWriteEventMask = spu.get_ch_value(SPU_RdEventMask);
ctxt->savedWriteTagGroupQueryMask = spu.get_ch_value(MFC_RdTagMask);
u32 r32;
spu.get_ch_value(SPU_RdEventMask, r32); ctxt->savedSpuWriteEventMask = r32;
spu.get_ch_value(MFC_RdTagMask, r32); ctxt->savedWriteTagGroupQueryMask = r32;
// Store the processor context
const u32 contextSaveStorage = vm::cast(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80, HERE);

View file

@ -44,16 +44,7 @@ bool RawSPUThread::read_reg(const u32 addr, u32& value)
case SPU_Out_MBox_offs:
{
value = ch_out_mbox.pop();
if (ch_out_mbox.notification_required)
{
// lock for reliable notification
std::lock_guard<std::mutex> lock(mutex);
cv.notify_one();
}
value = ch_out_mbox.pop(*this);
return true;
}
@ -173,14 +164,7 @@ bool RawSPUThread::write_reg(const u32 addr, const u32 value)
case SPU_In_MBox_offs:
{
if (ch_in_mbox.push(value))
{
// lock for reliable notification
std::lock_guard<std::mutex> lock(mutex);
cv.notify_one();
}
ch_in_mbox.push(*this, value);
return true;
}

View file

@ -33,7 +33,10 @@ void spu_interpreter::set_interrupt_status(SPUThread& spu, spu_opcode_t op)
void spu_interpreter::STOP(SPUThread& spu, spu_opcode_t op)
{
spu.stop_and_signal(op.opcode & 0x3fff);
if (!spu.stop_and_signal(op.opcode & 0x3fff))
{
spu.pc -= 4;
}
}
void spu_interpreter::LNOP(SPUThread& spu, spu_opcode_t op)
@ -59,7 +62,10 @@ void spu_interpreter::MFSPR(SPUThread& spu, spu_opcode_t op)
void spu_interpreter::RDCH(SPUThread& spu, spu_opcode_t op)
{
spu.gpr[op.rt] = v128::from32r(spu.get_ch_value(op.ra));
if (!spu.get_ch_value(op.ra, spu.gpr[op.rt]._u32[3]))
{
spu.pc -= 4;
}
}
void spu_interpreter::RCHCNT(SPUThread& spu, spu_opcode_t op)
@ -276,7 +282,10 @@ void spu_interpreter::MTSPR(SPUThread& spu, spu_opcode_t op)
void spu_interpreter::WRCH(SPUThread& spu, spu_opcode_t op)
{
spu.set_ch_value(op.ra, spu.gpr[op.rt]._u32[3]);
if (!spu.set_ch_value(op.ra, spu.gpr[op.rt]._u32[3]))
{
spu.pc -= 4;
}
}
void spu_interpreter::BIZ(SPUThread& spu, spu_opcode_t op)

View file

@ -39,8 +39,6 @@ cfg::map_entry<spu_decoder_type> g_cfg_spu_decoder(cfg::root.core, "SPU Decoder"
const spu_decoder<spu_interpreter_precise> s_spu_interpreter_precise;
const spu_decoder<spu_interpreter_fast> s_spu_interpreter_fast;
thread_local bool spu_channel_t::notification_required;
void spu_int_ctrl_t::set(u64 ints)
{
// leave only enabled interrupts
@ -185,27 +183,17 @@ SPUThread::~SPUThread()
void SPUThread::push_snr(u32 number, u32 value)
{
// get channel
const auto channel =
number == 0 ? &ch_snr1 :
number == 1 ? &ch_snr2 : throw EXCEPTION("Unexpected");
// Get channel
const auto channel = number & 1 ? &ch_snr2 : &ch_snr1;
// check corresponding SNR register settings
// Check corresponding SNR register settings
if ((snr_config >> number) & 1)
{
channel->push_or(value);
channel->push_or(*this, value);
}
else
{
channel->push(value);
}
if (channel->notification_required)
{
// lock for reliable notification
std::lock_guard<std::mutex> lock(mutex);
cv.notify_one();
channel->push(*this, value);
}
}
@ -365,13 +353,11 @@ void SPUThread::process_mfc_cmd(u32 cmd)
vm::reservation_acquire(vm::base(offset + ch_mfc_args.lsa), raddr, 128);
if (last_raddr)
if (std::exchange(last_raddr, raddr))
{
ch_event_stat |= SPU_EVENT_LR;
}
last_raddr = raddr;
return ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS);
}
@ -384,22 +370,18 @@ void SPUThread::process_mfc_cmd(u32 cmd)
if (vm::reservation_update(vm::cast(ch_mfc_args.ea, HERE), vm::base(offset + ch_mfc_args.lsa), 128))
{
if (last_raddr == 0)
if (std::exchange(last_raddr, 0) == 0)
{
throw EXCEPTION("Unexpected: PUTLLC command succeeded, but GETLLAR command not detected");
throw std::runtime_error("PUTLLC succeeded without GETLLAR" HERE);
}
last_raddr = 0;
return ch_atomic_stat.set_value(MFC_PUTLLC_SUCCESS);
}
else
{
if (last_raddr != 0)
if (std::exchange(last_raddr, 0))
{
ch_event_stat |= SPU_EVENT_LR;
last_raddr = 0;
}
return ch_atomic_stat.set_value(MFC_PUTLLC_FAILURE);
@ -437,8 +419,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
case MFC_BARRIER_CMD:
case MFC_EIEIO_CMD:
case MFC_SYNC_CMD:
LOG_WARNING(SPU, "process_mfc_cmd: Sync channel '%s' ignored. (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)",
get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size);
_mm_mfence();
return;
}
@ -543,31 +524,26 @@ u32 SPUThread::get_ch_count(u32 ch)
throw EXCEPTION("Unknown/illegal channel (ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???");
}
u32 SPUThread::get_ch_value(u32 ch)
bool SPUThread::get_ch_value(u32 ch, u32& out)
{
LOG_TRACE(SPU, "get_ch_value(ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???");
auto read_channel = [this](spu_channel_t& channel) -> u32
auto read_channel = [&](spu_channel_t& channel)
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
while (true)
{
bool result;
u32 value;
std::tie(result, value) = channel.try_pop();
if (result)
if (channel.try_pop(out))
{
return value;
return true;
}
CHECK_EMU_STATUS;
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
if (!lock)
@ -591,27 +567,21 @@ u32 SPUThread::get_ch_value(u32 ch)
while (true)
{
bool result;
u32 value;
u32 count;
std::tie(result, value, count) = ch_in_mbox.try_pop();
if (result)
if (const uint old_count = ch_in_mbox.try_pop(out))
{
if (count + 1 == 4 /* SPU_IN_MBOX_THRESHOLD */) // TODO: check this
if (old_count == 4 /* SPU_IN_MBOX_THRESHOLD */) // TODO: check this
{
int_ctrl[2].set(SPU_INT2_STAT_SPU_MAILBOX_THRESHOLD_INT);
}
return value;
return true;
}
CHECK_EMU_STATUS;
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
if (!lock)
@ -631,7 +601,8 @@ u32 SPUThread::get_ch_value(u32 ch)
case MFC_RdTagMask:
{
return ch_tag_mask;
out = ch_tag_mask;
return true;
}
case SPU_RdSigNotify1:
@ -656,12 +627,14 @@ u32 SPUThread::get_ch_value(u32 ch)
case SPU_RdDec:
{
return ch_dec_value - (u32)(get_timebased_time() - ch_dec_start_timestamp);
out = ch_dec_value - (u32)(get_timebased_time() - ch_dec_start_timestamp);
return true;
}
case SPU_RdEventMask:
{
return ch_event_mask;
out = ch_event_mask;
return true;
}
case SPU_RdEventStat:
@ -671,7 +644,8 @@ u32 SPUThread::get_ch_value(u32 ch)
// start waiting or return immediately
if (u32 res = get_events(true))
{
return res;
out = res;
return true;
}
if (ch_event_mask & SPU_EVENT_LR)
@ -696,24 +670,26 @@ u32 SPUThread::get_ch_value(u32 ch)
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
return get_events();
out = get_events();
return true;
}
case SPU_RdMachStat:
{
// HACK: "Not isolated" status
// Return SPU Interrupt status in LSB
return (ch_event_stat & SPU_EVENT_INTR_ENABLED) != 0;
out = (ch_event_stat & SPU_EVENT_INTR_ENABLED) != 0;
return true;
}
}
throw EXCEPTION("Unknown/illegal channel (ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???");
}
void SPUThread::set_ch_value(u32 ch, u32 value)
bool SPUThread::set_ch_value(u32 ch, u32 value)
{
LOG_TRACE(SPU, "set_ch_value(ch=%d [%s], value=0x%x)", ch, ch < 128 ? spu_ch_name[ch] : "???", value);
@ -734,7 +710,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
if (!lock)
@ -747,7 +723,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
int_ctrl[2].set(SPU_INT2_STAT_MAILBOX_INT);
return;
return true;
}
else
{
@ -781,17 +757,17 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
if (!queue)
{
LOG_WARNING(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (value & 0x00ffffff), data);
return ch_in_mbox.set_values(1, CELL_ENOTCONN); // TODO: check error passing
return ch_in_mbox.set_values(1, CELL_ENOTCONN), true; // TODO: check error passing
}
if (queue->events() >= queue->size)
{
return ch_in_mbox.set_values(1, CELL_EBUSY);
return ch_in_mbox.set_values(1, CELL_EBUSY), true;
}
queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, id, ((u64)spup << 32) | (value & 0x00ffffff), data);
return ch_in_mbox.set_values(1, CELL_OK);
return ch_in_mbox.set_values(1, CELL_OK), true;
}
else if (code < 128)
{
@ -817,18 +793,18 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
if (!queue)
{
LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x): event queue not connected", spup, (value & 0x00ffffff), data);
return;
return true;
}
// TODO: check passing spup value
if (queue->events() >= queue->size)
{
LOG_WARNING(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (queue is full)", spup, (value & 0x00ffffff), data);
return;
return true;
}
queue->push(lv2_lock, SYS_SPU_THREAD_EVENT_USER_KEY, id, ((u64)spup << 32) | (value & 0x00ffffff), data);
return;
return true;
}
else if (code == 128)
{
@ -863,7 +839,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
if (!eflag)
{
return ch_in_mbox.set_values(1, CELL_ESRCH);
return ch_in_mbox.set_values(1, CELL_ESRCH), true;
}
const u64 bitptn = 1ull << flag;
@ -874,7 +850,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
eflag->notify_all(lv2_lock);
}
return ch_in_mbox.set_values(1, CELL_OK);
return ch_in_mbox.set_values(1, CELL_OK), true;
}
else if (code == 192)
{
@ -904,7 +880,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
if (!eflag)
{
return;
return true;
}
const u64 bitptn = 1ull << flag;
@ -915,7 +891,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
eflag->notify_all(lv2_lock);
}
return;
return true;
}
else
{
@ -941,7 +917,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
if (!lock)
@ -953,19 +929,19 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
cv.wait(lock);
}
return;
return true;
}
case MFC_WrTagMask:
{
ch_tag_mask = value;
return;
return true;
}
case MFC_WrTagUpdate:
{
ch_tag_stat.set_value(ch_tag_mask); // hack
return;
return true;
}
case MFC_LSA:
@ -976,19 +952,19 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
ch_mfc_args.lsa = value;
return;
return true;
}
case MFC_EAH:
{
ch_mfc_args.eah = value;
return;
return true;
}
case MFC_EAL:
{
ch_mfc_args.eal = value;
return;
return true;
}
case MFC_Size:
@ -999,7 +975,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
ch_mfc_args.size = (u16)value;
return;
return true;
}
case MFC_TagID:
@ -1010,14 +986,14 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
ch_mfc_args.tag = (u16)value;
return;
return true;
}
case MFC_Cmd:
{
process_mfc_cmd(value);
ch_mfc_args = {}; // clear non-persistent data
return;
return true;
}
case MFC_WrListStallAck:
@ -1052,14 +1028,14 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
}
return;
return true;
}
case SPU_WrDec:
{
ch_dec_start_timestamp = get_timebased_time();
ch_dec_value = value;
return;
return true;
}
case SPU_WrEventMask:
@ -1077,7 +1053,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
ch_event_mask = value;
return;
return true;
}
case SPU_WrEventAck:
@ -1088,14 +1064,14 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
}
ch_event_stat &= ~value;
return;
return true;
}
}
throw EXCEPTION("Unknown/illegal channel (ch=%d [%s], value=0x%x)", ch, ch < 128 ? spu_ch_name[ch] : "???", value);
}
void SPUThread::stop_and_signal(u32 code)
bool SPUThread::stop_and_signal(u32 code)
{
LOG_TRACE(SPU, "stop_and_signal(code=0x%x)", code);
@ -1109,7 +1085,8 @@ void SPUThread::stop_and_signal(u32 code)
});
int_ctrl[2].set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT);
throw cpu_state::stop;
state += cpu_state::stop;
return true; // ???
}
switch (code)
@ -1117,13 +1094,13 @@ void SPUThread::stop_and_signal(u32 code)
case 0x001:
{
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
return;
return true;
}
case 0x002:
{
state += cpu_state::ret;
return;
return true;
}
case 0x003:
@ -1140,7 +1117,7 @@ void SPUThread::stop_and_signal(u32 code)
pc = (gpr[0]._u32[3] & 0x3fffc) - 4;
}
return;
return true;
}
case 0x110:
@ -1157,7 +1134,7 @@ void SPUThread::stop_and_signal(u32 code)
if (u32 count = ch_in_mbox.get_count())
{
LOG_ERROR(SPU, "sys_spu_thread_receive_event(): In_MBox is not empty (%d)", count);
return ch_in_mbox.set_values(1, CELL_EBUSY);
return ch_in_mbox.set_values(1, CELL_EBUSY), true;
}
const u32 spuq = ch_out_mbox.get_value();
@ -1175,7 +1152,7 @@ void SPUThread::stop_and_signal(u32 code)
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate
{
return ch_in_mbox.set_values(1, CELL_EINVAL);
return ch_in_mbox.set_values(1, CELL_EINVAL), true;
}
std::shared_ptr<lv2_event_queue_t> queue;
@ -1195,7 +1172,7 @@ void SPUThread::stop_and_signal(u32 code)
if (!queue)
{
return ch_in_mbox.set_values(1, CELL_EINVAL); // TODO: check error value
return ch_in_mbox.set_values(1, CELL_EINVAL), true; // TODO: check error value
}
// check thread group status
@ -1205,7 +1182,7 @@ void SPUThread::stop_and_signal(u32 code)
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
group->cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
@ -1246,7 +1223,7 @@ void SPUThread::stop_and_signal(u32 code)
if (state & cpu_state::stop)
{
throw cpu_state::stop;
return false;
}
cv.wait(lv2_lock);
@ -1281,7 +1258,7 @@ void SPUThread::stop_and_signal(u32 code)
state -= cpu_state::suspend;
group->cv.notify_all();
return;
return true;
}
case 0x101:
@ -1322,7 +1299,8 @@ void SPUThread::stop_and_signal(u32 code)
group->join_state |= SPU_TGJSF_GROUP_EXIT;
group->cv.notify_one();
throw cpu_state::stop;
state += cpu_state::stop;
return true;
}
case 0x102:
@ -1348,7 +1326,8 @@ void SPUThread::stop_and_signal(u32 code)
status |= SPU_STATUS_STOPPED_BY_STOP;
group->cv.notify_one();
throw cpu_state::stop;
state += cpu_state::stop;
return true;
}
}

View file

@ -145,9 +145,6 @@ enum : u32
struct spu_channel_t
{
// set to true if SPU thread must be notified after SPU channel operation
thread_local static bool notification_required;
struct alignas(8) sync_var_t
{
bool count; // value available
@ -174,7 +171,7 @@ public:
}
// push performing bitwise OR with previous value, may require notification
void push_or(u32 value)
void push_or(cpu_thread& spu, u32 value)
{
const auto old = data.fetch_op([=](sync_var_t& data)
{
@ -183,11 +180,11 @@ public:
data.value |= value;
});
notification_required = old.wait;
if (old.wait) spu.safe_notify();
}
// push unconditionally (overwriting previous value), may require notification
void push(u32 value)
void push(cpu_thread& spu, u32 value)
{
const auto old = data.fetch_op([=](sync_var_t& data)
{
@ -196,24 +193,33 @@ public:
data.value = value;
});
notification_required = old.wait;
if (old.wait) spu.safe_notify();
}
// returns true on success and loaded value
std::tuple<bool, u32> try_pop()
// returns true on success
bool try_pop(u32& out)
{
const auto old = data.fetch_op([](sync_var_t& data)
const auto old = data.fetch_op([&](sync_var_t& data)
{
data.wait = !data.count;
if (data.count)
{
data.wait = false;
out = data.value;
}
else
{
data.wait = true;
}
data.count = false;
data.value = 0; // ???
});
return std::tie(old.count, old.value);
return old.count;
}
// pop unconditionally (loading last value), may require notification
u32 pop()
u32 pop(cpu_thread& spu)
{
const auto old = data.fetch_op([](sync_var_t& data)
{
@ -222,7 +228,7 @@ public:
// value is not cleared and may be read again
});
notification_required = old.wait;
if (old.wait) spu.safe_notify();
return old.value;
}
@ -269,11 +275,11 @@ public:
}
// push unconditionally (overwriting latest value), returns true if needs signaling
bool push(u32 value)
void push(cpu_thread& spu, u32 value)
{
value3 = value; _mm_sfence();
return values.atomic_op([=](sync_var_t& data) -> bool
if (values.atomic_op([=](sync_var_t& data) -> bool
{
switch (data.count++)
{
@ -291,20 +297,24 @@ public:
}
return false;
});
}))
{
spu.safe_notify();
}
}
// returns true on success and two u32 values: data and count after removing the first element
std::tuple<bool, u32, u32> try_pop()
// returns non-zero value on success: queue size before removal
uint try_pop(u32& out)
{
return values.atomic_op([this](sync_var_t& data)
return values.atomic_op([&](sync_var_t& data)
{
const auto result = std::make_tuple(data.count != 0, u32{ data.value0 }, u32{ data.count - 1u });
const uint result = data.count;
if (data.count != 0)
if (result != 0)
{
data.waiting = 0;
data.count--;
out = data.value0;
data.value0 = data.value1;
data.value1 = data.value2;
@ -620,10 +630,9 @@ public:
void set_events(u32 mask);
void set_interrupt_status(bool enable);
u32 get_ch_count(u32 ch);
u32 get_ch_value(u32 ch);
void set_ch_value(u32 ch, u32 value);
void stop_and_signal(u32 code);
bool get_ch_value(u32 ch, u32& out);
bool set_ch_value(u32 ch, u32 value);
bool stop_and_signal(u32 code);
void halt();
void fast_call(u32 ls_addr);

View file

@ -203,12 +203,7 @@ s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status)
// TODO: check CELL_ESTAT condition
*status = thread->ch_out_mbox.pop();
if (thread->ch_out_mbox.notification_required)
{
throw EXCEPTION("Unexpected");
}
*status = thread->ch_out_mbox.pop(*thread);
return CELL_OK;
}
@ -713,13 +708,7 @@ s32 sys_spu_thread_write_spu_mb(u32 id, u32 value)
return CELL_ESTAT;
}
if (thread->ch_in_mbox.push(value))
{
// lock for reliable notification
std::lock_guard<std::mutex> lock(thread->mutex);
thread->cv.notify_one();
}
thread->ch_in_mbox.push(*thread, value);
return CELL_OK;
}
@ -1330,15 +1319,7 @@ s32 sys_raw_spu_read_puint_mb(u32 id, vm::ptr<u32> value)
return CELL_ESRCH;
}
*value = thread->ch_out_intr_mbox.pop();
if (thread->ch_out_intr_mbox.notification_required)
{
// lock for reliable notification
std::lock_guard<std::mutex> lock(thread->mutex);
thread->cv.notify_one();
}
*value = thread->ch_out_intr_mbox.pop(*thread);
return CELL_OK;
}
@ -1379,82 +1360,3 @@ s32 sys_raw_spu_get_spu_cfg(u32 id, vm::ptr<u32> value)
return CELL_OK;
}
void sys_spu_thread_exit(SPUThread & spu, s32 status)
{
// Cancel any pending status update requests
spu.set_ch_value(MFC_WrTagUpdate, 0);
while (spu.get_ch_count(MFC_RdTagStat) != 1);
spu.get_ch_value(MFC_RdTagStat);
// Wait for all pending DMA operations to complete
spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
spu.get_ch_value(MFC_RdTagStat);
spu.set_ch_value(SPU_WrOutMbox, status);
spu.stop_and_signal(0x102);
}
void sys_spu_thread_group_exit(SPUThread & spu, s32 status)
{
// Cancel any pending status update requests
spu.set_ch_value(MFC_WrTagUpdate, 0);
while (spu.get_ch_count(MFC_RdTagStat) != 1);
spu.get_ch_value(MFC_RdTagStat);
// Wait for all pending DMA operations to complete
spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
spu.get_ch_value(MFC_RdTagStat);
spu.set_ch_value(SPU_WrOutMbox, status);
spu.stop_and_signal(0x101);
}
s32 sys_spu_thread_send_event(SPUThread & spu, u8 spup, u32 data0, u32 data1)
{
if (spup > 0x3F)
{
return CELL_EINVAL;
}
if (spu.get_ch_count(SPU_RdInMbox))
{
return CELL_EBUSY;
}
spu.set_ch_value(SPU_WrOutMbox, data1);
spu.set_ch_value(SPU_WrOutIntrMbox, (spup << 24) | (data0 & 0x00FFFFFF));
return spu.get_ch_value(SPU_RdInMbox);
}
s32 sys_spu_thread_switch_system_module(SPUThread & spu, u32 status)
{
if (spu.get_ch_count(SPU_RdInMbox))
{
return CELL_EBUSY;
}
// Cancel any pending status update requests
spu.set_ch_value(MFC_WrTagUpdate, 0);
while (spu.get_ch_count(MFC_RdTagStat) != 1);
spu.get_ch_value(MFC_RdTagStat);
// Wait for all pending DMA operations to complete
spu.set_ch_value(MFC_WrTagMask, 0xFFFFFFFF);
spu.set_ch_value(MFC_WrTagUpdate, MFC_TAG_UPDATE_ALL);
spu.get_ch_value(MFC_RdTagStat);
s32 result;
do
{
spu.set_ch_value(SPU_WrOutMbox, status);
spu.stop_and_signal(0x120);
}
while ((result = spu.get_ch_value(SPU_RdInMbox)) == CELL_EBUSY);
return result;
}

View file

@ -242,9 +242,3 @@ s32 sys_raw_spu_get_int_stat(u32 id, u32 class_id, vm::ptr<u64> stat);
s32 sys_raw_spu_read_puint_mb(u32 id, vm::ptr<u32> value);
s32 sys_raw_spu_set_spu_cfg(u32 id, u32 value);
s32 sys_raw_spu_get_spu_cfg(u32 id, vm::ptr<u32> value);
// SPU Calls
void sys_spu_thread_exit(SPUThread & spu, s32 status);
void sys_spu_thread_group_exit(SPUThread & spu, s32 status);
s32 sys_spu_thread_send_event(SPUThread & spu, u8 spup, u32 data0, u32 data1);
s32 sys_spu_thread_switch_system_module(SPUThread & spu, u32 status);