sys_spu...

This commit is contained in:
Nekotekina 2017-02-05 02:26:57 +03:00
parent 68f0393cf3
commit ec943b38a2
4 changed files with 261 additions and 362 deletions

View file

@ -52,7 +52,7 @@ void spu_int_ctrl_t::set(u64 ints)
// notify if at least 1 bit was set
if (ints && ~stat.fetch_or(ints) & ints && tag)
{
LV2_LOCK;
reader_lock rlock(id_manager::g_mutex);
if (tag)
{
@ -246,14 +246,16 @@ SPUThread::SPUThread(const std::string& name)
, m_name(name)
, index(0)
, offset(0)
, group(nullptr)
{
}
SPUThread::SPUThread(const std::string& name, u32 index)
SPUThread::SPUThread(const std::string& name, u32 index, lv2_spu_group* group)
: cpu_thread(idm::last_id())
, m_name(name)
, index(index)
, offset(verify("SPU LS" HERE, vm::alloc(0x40000, vm::main)))
, group(group)
{
}
@ -287,8 +289,6 @@ void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
const u32 index = (eal - SYS_SPU_THREAD_BASE_LOW) / SYS_SPU_THREAD_OFFSET; // thread number in group
const u32 offset = (eal - SYS_SPU_THREAD_BASE_LOW) % SYS_SPU_THREAD_OFFSET; // LS offset or MMIO register
const auto group = tg.lock();
if (group && index < group->num && group->threads[index])
{
auto& spu = static_cast<SPUThread&>(*group->threads[index]);
@ -630,8 +630,6 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
return true;
}
CHECK_EMU_STATUS;
if (test(state & cpu_flag::stop))
{
return false;
@ -703,8 +701,6 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
// simple waiting loop otherwise
while (!get_events(true) && !test(state & cpu_flag::stop))
{
CHECK_EMU_STATUS;
thread_ctrl::wait();
}
}
@ -747,8 +743,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
while (!ch_out_intr_mbox.try_push(value))
{
CHECK_EMU_STATUS;
if (test(state & cpu_flag::stop))
{
return false;
@ -760,18 +754,17 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
int_ctrl[2].set(SPU_INT2_STAT_MAILBOX_INT);
return true;
}
else
const u32 code = value >> 24;
{
const u8 code = value >> 24;
if (code < 64)
{
/* ===== sys_spu_thread_send_event (used by spu_printf) ===== */
LV2_LOCK;
u32 spup = code & 63;
u32 data;
const u8 spup = code & 63;
if (!ch_out_mbox.get_count())
if (!ch_out_mbox.try_pop(data))
{
fmt::throw_exception("sys_spu_thread_send_event(value=0x%x, spup=%d): Out_MBox is empty" HERE, value, spup);
}
@ -781,13 +774,9 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
fmt::throw_exception("sys_spu_thread_send_event(value=0x%x, spup=%d): In_MBox is not empty (count=%d)" HERE, value, spup, count);
}
const u32 data = ch_out_mbox.get_value();
ch_out_mbox.set_value(data, 0);
LOG_TRACE(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
const auto queue = this->spup[spup].lock();
const auto queue = (semaphore_lock{group->mutex}, this->spup[spup].lock());
if (!queue)
{
@ -806,22 +795,17 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
/* ===== sys_spu_thread_throw_event ===== */
LV2_LOCK;
u32 spup = code & 63;
u32 data;
const u8 spup = code & 63;
if (!ch_out_mbox.get_count())
if (!ch_out_mbox.try_pop(data))
{
fmt::throw_exception("sys_spu_thread_throw_event(value=0x%x, spup=%d): Out_MBox is empty" HERE, value, spup);
}
const u32 data = ch_out_mbox.get_value();
ch_out_mbox.set_value(data, 0);
LOG_TRACE(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
const auto queue = this->spup[spup].lock();
const auto queue = (semaphore_lock{group->mutex}, this->spup[spup].lock());
if (!queue)
{
@ -841,11 +825,10 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
/* ===== sys_event_flag_set_bit ===== */
LV2_LOCK;
u32 flag = value & 0xffffff;
u32 data;
const u32 flag = value & 0xffffff;
if (!ch_out_mbox.get_count())
if (!ch_out_mbox.try_pop(data))
{
fmt::throw_exception("sys_event_flag_set_bit(value=0x%x (flag=%d)): Out_MBox is empty" HERE, value, flag);
}
@ -855,10 +838,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
fmt::throw_exception("sys_event_flag_set_bit(value=0x%x (flag=%d)): In_MBox is not empty (%d)" HERE, value, flag, count);
}
const u32 data = ch_out_mbox.get_value();
ch_out_mbox.set_value(data, 0);
if (flag > 63)
{
fmt::throw_exception("sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d)): Invalid flag" HERE, data, value, flag);
@ -873,19 +852,14 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
/* ===== sys_event_flag_set_bit_impatient ===== */
LV2_LOCK;
u32 flag = value & 0xffffff;
u32 data;
const u32 flag = value & 0xffffff;
if (!ch_out_mbox.get_count())
if (!ch_out_mbox.try_pop(data))
{
fmt::throw_exception("sys_event_flag_set_bit_impatient(value=0x%x (flag=%d)): Out_MBox is empty" HERE, value, flag);
}
const u32 data = ch_out_mbox.get_value();
ch_out_mbox.set_value(data, 0);
if (flag > 63)
{
fmt::throw_exception("sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d)): Invalid flag" HERE, data, value, flag);
@ -915,8 +889,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
while (!ch_out_mbox.try_push(value))
{
CHECK_EMU_STATUS;
if (test(state & cpu_flag::stop))
{
return false;
@ -1120,9 +1092,9 @@ bool SPUThread::stop_and_signal(u32 code)
{
/* ===== sys_spu_thread_receive_event ===== */
LV2_LOCK;
u32 spuq;
if (!ch_out_mbox.get_count())
if (!ch_out_mbox.try_pop(spuq))
{
fmt::throw_exception("sys_spu_thread_receive_event(): Out_MBox is empty" HERE);
}
@ -1133,19 +1105,8 @@ bool SPUThread::stop_and_signal(u32 code)
return ch_in_mbox.set_values(1, CELL_EBUSY), true;
}
const u32 spuq = ch_out_mbox.get_value();
ch_out_mbox.set_value(spuq, 0);
LOG_TRACE(SPU, "sys_spu_thread_receive_event(spuq=0x%x)", spuq);
const auto group = tg.lock();
if (!group)
{
fmt::throw_exception("Invalid SPU Thread Group" HERE);
}
if (group->type & SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT) // this check may be inaccurate
{
return ch_in_mbox.set_values(1, CELL_EINVAL), true;
@ -1153,113 +1114,121 @@ bool SPUThread::stop_and_signal(u32 code)
std::shared_ptr<lv2_event_queue> queue;
for (auto& v : this->spuq)
while (true)
{
if (spuq == v.first)
{
queue = v.second.lock();
queue.reset();
if (queue)
// Check group status, wait if necessary
while (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_SUSPENDED)
{
if (test(state & cpu_flag::stop))
{
break;
return false;
}
thread_ctrl::wait();
}
reader_lock rlock(id_manager::g_mutex);
semaphore_lock lock(group->mutex);
if (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
// Try again
continue;
}
for (auto& v : this->spuq)
{
if (spuq == v.first)
{
queue = v.second.lock();
if (queue)
{
break;
}
}
}
}
if (!queue)
{
return ch_in_mbox.set_values(1, CELL_EINVAL), true; // TODO: check error value
}
// check thread group status
while (group->state >= SPU_THREAD_GROUP_STATUS_WAITING && group->state <= SPU_THREAD_GROUP_STATUS_SUSPENDED)
{
CHECK_EMU_STATUS;
if (test(state & cpu_flag::stop))
if (!queue)
{
return false;
return ch_in_mbox.set_values(1, CELL_EINVAL), true; // TODO: check error value
}
group->cv.wait(lv2_lock, 1000);
}
// change group status
if (group->state == SPU_THREAD_GROUP_STATUS_RUNNING)
{
group->state = SPU_THREAD_GROUP_STATUS_WAITING;
for (auto& thread : group->threads)
{
if (thread)
{
thread->state += cpu_flag::suspend;
}
}
}
else
{
fmt::throw_exception("Unexpected SPU Thread Group state (%d)" HERE, (u32)group->state);
}
{
semaphore_lock lock(queue->mutex);
semaphore_lock qlock(queue->mutex);
if (queue->events.empty())
{
queue->sq.emplace_back(this);
group->run_state = SPU_THREAD_GROUP_STATUS_WAITING;
for (auto& thread : group->threads)
{
if (thread)
{
thread->state += cpu_flag::suspend;
}
}
// Wait
break;
}
else
{
// Return the event immediately
const auto event = queue->events.front();
const auto data1 = static_cast<u32>(std::get<1>(event));
const auto data2 = static_cast<u32>(std::get<2>(event));
const auto data3 = static_cast<u32>(std::get<3>(event));
ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3);
queue->events.pop_front();
state += cpu_flag::signal;
return true;
}
}
// wait on the event queue
while (!state.test_and_reset(cpu_flag::signal))
while (true)
{
CHECK_EMU_STATUS;
if (test(state & cpu_flag::stop))
{
return false;
}
LV2_UNLOCK, thread_ctrl::wait();
if (!state.test_and_reset(cpu_flag::signal))
{
thread_ctrl::wait();
}
else
{
break;
}
}
semaphore_lock lock(group->mutex);
// restore thread group status
if (group->state == SPU_THREAD_GROUP_STATUS_WAITING)
if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING)
{
group->state = SPU_THREAD_GROUP_STATUS_RUNNING;
group->run_state = SPU_THREAD_GROUP_STATUS_RUNNING;
}
else if (group->state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
group->state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
}
else
{
fmt::throw_exception("Unexpected SPU Thread Group state (%d)" HERE, (u32)group->state);
group->run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
}
for (auto& thread : group->threads)
{
if (thread && thread.get() != this)
if (thread)
{
thread->state -= cpu_flag::suspend;
thread->notify();
if (thread.get() != this)
{
thread->notify();
}
}
}
state -= cpu_flag::suspend;
group->cv.notify_all();
return true;
}
@ -1267,25 +1236,16 @@ bool SPUThread::stop_and_signal(u32 code)
{
/* ===== sys_spu_thread_group_exit ===== */
LV2_LOCK;
u32 value;
if (!ch_out_mbox.get_count())
if (!ch_out_mbox.try_pop(value))
{
fmt::throw_exception("sys_spu_thread_group_exit(): Out_MBox is empty" HERE);
}
const u32 value = ch_out_mbox.get_value();
ch_out_mbox.set_value(value, 0);
LOG_TRACE(SPU, "sys_spu_thread_group_exit(status=0x%x)", value);
const auto group = tg.lock();
if (!group)
{
fmt::throw_exception("Invalid SPU Thread Group" HERE);
}
semaphore_lock lock(group->mutex);
for (auto& thread : group->threads)
{
@ -1296,7 +1256,7 @@ bool SPUThread::stop_and_signal(u32 code)
}
}
group->state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
group->exit_status = value;
group->join_state |= SPU_TGJSF_GROUP_EXIT;
group->cv.notify_one();
@ -1309,8 +1269,6 @@ bool SPUThread::stop_and_signal(u32 code)
{
/* ===== sys_spu_thread_exit ===== */
LV2_LOCK;
if (!ch_out_mbox.get_count())
{
fmt::throw_exception("sys_spu_thread_exit(): Out_MBox is empty" HERE);
@ -1318,12 +1276,7 @@ bool SPUThread::stop_and_signal(u32 code)
LOG_TRACE(SPU, "sys_spu_thread_exit(status=0x%x)", ch_out_mbox.get_value());
const auto group = tg.lock();
if (!group)
{
fmt::throw_exception("Invalid SPU Thread Group" HERE);
}
semaphore_lock lock(group->mutex);
status |= SPU_STATUS_STOPPED_BY_STOP;
group->cv.notify_one();

View file

@ -505,7 +505,7 @@ public:
static const u32 id_step = 1;
static const u32 id_count = 2048;
SPUThread(const std::string& name, u32 index);
SPUThread(const std::string& name, u32 index, lv2_spu_group* group);
std::array<v128, 128> gpr; // General-Purpose Registers
SPU_FPSCR fpscr;
@ -544,14 +544,13 @@ public:
std::array<spu_int_ctrl_t, 3> int_ctrl; // SPU Class 0, 1, 2 Interrupt Management
std::weak_ptr<lv2_spu_group> tg; // SPU Thread Group
std::array<std::pair<u32, std::weak_ptr<lv2_event_queue>>, 32> spuq; // Event Queue Keys for SPU Thread
std::weak_ptr<lv2_event_queue> spup[64]; // SPU Ports
u32 pc = 0; //
const u32 index; // SPU index
const u32 offset; // SPU LS offset
lv2_spu_group* const group; // SPU Thread Group
const std::string m_name; // Thread name

View file

@ -88,52 +88,12 @@ s32 sys_spu_image_open(vm::ptr<sys_spu_image_t> img, vm::cptr<char> path)
return CELL_OK;
}
u32 spu_thread_initialize(u32 group_id, u32 spu_num, vm::ptr<sys_spu_image_t> img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task = nullptr)
{
if (option)
{
sys_spu.error("Unsupported SPU Thread options (0x%x)", option);
}
const auto spu = idm::make_ptr<SPUThread>(name, spu_num);
spu->custom_task = task;
const auto group = idm::get<lv2_spu_group>(group_id);
spu->tg = group;
group->threads[spu_num] = spu;
group->args[spu_num] = { a1, a2, a3, a4 };
group->images[spu_num] = img;
u32 count = 0;
for (auto& t : group->threads)
{
if (t)
{
count++;
}
}
if (count > group->num)
{
fmt::throw_exception("Unexpected thread count (%d)" HERE, count);
}
if (count == group->num)
{
group->state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
}
return spu->id;
}
s32 sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group_id, u32 spu_num, vm::ptr<sys_spu_image_t> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg)
{
sys_spu.warning("sys_spu_thread_initialize(thread=*0x%x, group=0x%x, spu_num=%d, img=*0x%x, attr=*0x%x, arg=*0x%x)", thread, group_id, spu_num, img, attr, arg);
LV2_LOCK;
// Read thread name
const std::string thread_name(attr->name.get_ptr(), attr->name ? attr->name_len - 1 : 0);
const auto group = idm::get<lv2_spu_group>(group_id);
@ -142,17 +102,36 @@ s32 sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group_id, u32 spu_num, vm
return CELL_ESRCH;
}
semaphore_lock lock(group->mutex);
if (spu_num >= group->threads.size())
{
return CELL_EINVAL;
}
if (group->threads[spu_num] || group->state != SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
if (group->threads[spu_num] || group->run_state != SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
{
return CELL_EBUSY;
}
*thread = spu_thread_initialize(group_id, spu_num, img, attr->name ? std::string(attr->name.get_ptr(), attr->name_len - 1) : "", attr->option, arg->arg1, arg->arg2, arg->arg3, arg->arg4);
if (u32 option = attr->option)
{
sys_spu.todo("Unimplemented SPU Thread options (0x%x)", option);
}
auto spu = idm::make_ptr<SPUThread>(thread_name, spu_num, group.get());
*thread = spu->id;
group->threads[spu_num] = std::move(spu);
group->args[spu_num] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4};
group->images[spu_num] = img;
if (++group->init == group->num)
{
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
}
return CELL_OK;
}
@ -160,8 +139,6 @@ s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg)
{
sys_spu.warning("sys_spu_thread_set_argument(id=0x%x, arg=*0x%x)", id, arg);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -169,17 +146,9 @@ s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg)
return CELL_ESRCH;
}
const auto group = thread->tg.lock();
const auto group = thread->group;
if (!group)
{
fmt::throw_exception("Invalid SPU thread group" HERE);
}
if (thread->index >= group->threads.size() || group->threads[thread->index] != thread)
{
fmt::throw_exception("Unexpected SPU thread index (%d)" HERE, thread->index);
}
semaphore_lock lock(group->mutex);
group->args[thread->index].arg1 = arg->arg1;
group->args[thread->index].arg2 = arg->arg2;
@ -193,8 +162,6 @@ s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status)
{
sys_spu.warning("sys_spu_thread_get_exit_status(id=0x%x, status=*0x%x)", id, status);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -234,34 +201,37 @@ s32 sys_spu_thread_group_destroy(u32 id)
{
sys_spu.warning("sys_spu_thread_group_destroy(id=0x%x)", id);
LV2_LOCK;
const auto group = idm::withdraw<lv2_spu_group>(id, [](lv2_spu_group& group) -> CellError
{
const auto _old = group.run_state.compare_and_swap(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED);
const auto group = idm::get<lv2_spu_group>(id);
if (_old > SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
return CELL_EBUSY;
}
return {};
});
if (!group)
{
return CELL_ESRCH;
}
if (group->state > SPU_THREAD_GROUP_STATUS_INITIALIZED)
if (group.ret)
{
return CELL_EBUSY;
return group.ret;
}
// clear threads
for (auto& t : group->threads)
// Cleanup
for (auto& ptr : group->threads)
{
if (t)
if (auto thread = std::move(ptr))
{
idm::remove<SPUThread>(t->id);
t.reset();
idm::remove<SPUThread>(thread->id);
}
}
group->state = SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED; // hack
idm::remove<lv2_spu_group>(id);
return CELL_OK;
}
@ -269,55 +239,51 @@ s32 sys_spu_thread_group_start(u32 id)
{
sys_spu.warning("sys_spu_thread_group_start(id=0x%x)", id);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get<lv2_spu_group>(id, [](lv2_spu_group& group)
{
// SPU_THREAD_GROUP_STATUS_READY state is not used
return group.run_state.compare_and_swap_test(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_RUNNING);
});
if (!group)
{
return CELL_ESRCH;
}
if (group->state != SPU_THREAD_GROUP_STATUS_INITIALIZED)
if (!group.ret)
{
return CELL_ESTAT;
}
// SPU_THREAD_GROUP_STATUS_READY state is not used
semaphore_lock lock(group->mutex);
group->state = SPU_THREAD_GROUP_STATUS_RUNNING;
group->join_state = 0;
for (auto& t : group->threads)
for (auto& thread : group->threads)
{
if (t)
if (thread)
{
if (t->index >= group->threads.size())
{
fmt::throw_exception("Unexpected SPU thread index (%d)" HERE, t->index);
}
auto& args = group->args[t->index];
auto& image = group->images[t->index];
auto& args = group->args[thread->index];
auto& image = group->images[thread->index];
// Copy SPU image:
// TODO: use segment info
std::memcpy(vm::base(t->offset), image->segs.get_ptr(), 256 * 1024);
std::memcpy(vm::base(thread->offset), image->segs.get_ptr(), 256 * 1024);
t->pc = image->entry_point;
t->cpu_init();
t->gpr[3] = v128::from64(0, args.arg1);
t->gpr[4] = v128::from64(0, args.arg2);
t->gpr[5] = v128::from64(0, args.arg3);
t->gpr[6] = v128::from64(0, args.arg4);
thread->pc = image->entry_point;
thread->cpu_init();
thread->gpr[3] = v128::from64(0, args.arg1);
thread->gpr[4] = v128::from64(0, args.arg2);
thread->gpr[5] = v128::from64(0, args.arg3);
thread->gpr[6] = v128::from64(0, args.arg4);
t->status.exchange(SPU_STATUS_RUNNING);
thread->status.exchange(SPU_STATUS_RUNNING);
}
}
// because SPU_THREAD_GROUP_STATUS_READY is not possible, run event is delivered immediately
group->send_run_event(id, 0, 0); // TODO: check data2 and data3
// Because SPU_THREAD_GROUP_STATUS_READY is not possible, run event is delivered immediately
// TODO: check data2 and data3
group->send_run_event(id, 0, 0);
for (auto& thread : group->threads)
{
@ -334,8 +300,6 @@ s32 sys_spu_thread_group_suspend(u32 id)
{
sys_spu.trace("sys_spu_thread_group_suspend(id=0x%x)", id);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
@ -348,24 +312,26 @@ s32 sys_spu_thread_group_suspend(u32 id)
return CELL_EINVAL;
}
if (group->state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->state == SPU_THREAD_GROUP_STATUS_STOPPED)
semaphore_lock lock(group->mutex);
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->run_state == SPU_THREAD_GROUP_STATUS_STOPPED)
{
return CELL_ESTAT;
}
// SPU_THREAD_GROUP_STATUS_READY state is not used
if (group->state == SPU_THREAD_GROUP_STATUS_RUNNING)
if (group->run_state == SPU_THREAD_GROUP_STATUS_RUNNING)
{
group->state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
group->run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
}
else if (group->state == SPU_THREAD_GROUP_STATUS_WAITING)
else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING)
{
group->state = SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
group->run_state = SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
}
else if (group->state == SPU_THREAD_GROUP_STATUS_SUSPENDED || group->state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
else if (group->run_state == SPU_THREAD_GROUP_STATUS_SUSPENDED || group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
return CELL_OK; // probably, nothing to do there
return CELL_OK;
}
else
{
@ -387,8 +353,6 @@ s32 sys_spu_thread_group_resume(u32 id)
{
sys_spu.trace("sys_spu_thread_group_resume(id=0x%x)", id);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
@ -401,15 +365,17 @@ s32 sys_spu_thread_group_resume(u32 id)
return CELL_EINVAL;
}
semaphore_lock lock(group->mutex);
// SPU_THREAD_GROUP_STATUS_READY state is not used
if (group->state == SPU_THREAD_GROUP_STATUS_SUSPENDED)
if (group->run_state == SPU_THREAD_GROUP_STATUS_SUSPENDED)
{
group->state = SPU_THREAD_GROUP_STATUS_RUNNING;
group->run_state = SPU_THREAD_GROUP_STATUS_RUNNING;
}
else if (group->state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
else if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
group->state = SPU_THREAD_GROUP_STATUS_WAITING;
group->run_state = SPU_THREAD_GROUP_STATUS_WAITING;
}
else
{
@ -425,8 +391,6 @@ s32 sys_spu_thread_group_resume(u32 id)
}
}
group->cv.notify_all();
return CELL_OK;
}
@ -434,8 +398,6 @@ s32 sys_spu_thread_group_yield(u32 id)
{
sys_spu.trace("sys_spu_thread_group_yield(id=0x%x)", id);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
@ -448,7 +410,7 @@ s32 sys_spu_thread_group_yield(u32 id)
return CELL_OK;
}
if (group->state != SPU_THREAD_GROUP_STATUS_RUNNING)
if (group->run_state != SPU_THREAD_GROUP_STATUS_RUNNING)
{
return CELL_ESTAT;
}
@ -462,11 +424,10 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
{
sys_spu.warning("sys_spu_thread_group_terminate(id=0x%x, value=0x%x)", id, value);
LV2_LOCK;
// seems the id can be either SPU Thread Group or SPU Thread
// The id can be either SPU Thread Group or SPU Thread
const auto thread = idm::get<SPUThread>(id);
const auto group = thread ? thread->tg.lock() : idm::get<lv2_spu_group>(id);
const auto _group = idm::get<lv2_spu_group>(id);
const auto group = thread ? thread->group : _group.get();
if (!group && !thread)
{
@ -492,9 +453,11 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
}
}
if (group->state <= SPU_THREAD_GROUP_STATUS_INITIALIZED ||
group->state == SPU_THREAD_GROUP_STATUS_WAITING ||
group->state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
semaphore_lock lock(group->mutex);
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED ||
group->run_state == SPU_THREAD_GROUP_STATUS_WAITING ||
group->run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
return CELL_ESTAT;
}
@ -508,7 +471,7 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
}
}
group->state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
group->exit_status = value;
group->join_state |= SPU_TGJSF_TERMINATED;
group->cv.notify_one();
@ -520,8 +483,6 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
{
sys_spu.warning("sys_spu_thread_group_join(id=0x%x, cause=*0x%x, status=*0x%x)", id, cause, status);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
@ -529,7 +490,9 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
return CELL_ESRCH;
}
if (group->state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
semaphore_lock lock(group->mutex);
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
return CELL_ESTAT;
}
@ -563,7 +526,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
CHECK_EMU_STATUS;
group->cv.wait(lv2_lock, 1000);
group->cv.wait(lock, 1000);
}
switch (group->join_state & ~SPU_TGJSF_IS_JOINING)
@ -595,7 +558,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
}
group->join_state &= ~SPU_TGJSF_IS_JOINING;
group->state = SPU_THREAD_GROUP_STATUS_INITIALIZED; // hack
group->run_state = SPU_THREAD_GROUP_STATUS_INITIALIZED; // hack
return CELL_OK;
}
@ -603,8 +566,6 @@ s32 sys_spu_thread_write_ls(u32 id, u32 lsa, u64 value, u32 type)
{
sys_spu.trace("sys_spu_thread_write_ls(id=0x%x, lsa=0x%05x, value=0x%llx, type=%d)", id, lsa, value, type);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -617,14 +578,11 @@ s32 sys_spu_thread_write_ls(u32 id, u32 lsa, u64 value, u32 type)
return CELL_EINVAL;
}
const auto group = thread->tg.lock();
const auto group = thread->group;
if (!group)
{
fmt::throw_exception("Invalid SPU thread group" HERE);
}
semaphore_lock lock(group->mutex);
if (group->state < SPU_THREAD_GROUP_STATUS_WAITING || group->state > SPU_THREAD_GROUP_STATUS_RUNNING)
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
{
return CELL_ESTAT;
}
@ -645,8 +603,6 @@ s32 sys_spu_thread_read_ls(u32 id, u32 lsa, vm::ptr<u64> value, u32 type)
{
sys_spu.trace("sys_spu_thread_read_ls(id=0x%x, lsa=0x%05x, value=*0x%x, type=%d)", id, lsa, value, type);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -659,14 +615,11 @@ s32 sys_spu_thread_read_ls(u32 id, u32 lsa, vm::ptr<u64> value, u32 type)
return CELL_EINVAL;
}
const auto group = thread->tg.lock();
const auto group = thread->group;
if (!group)
{
fmt::throw_exception("Invalid SPU thread group" HERE);
}
semaphore_lock lock(group->mutex);
if (group->state < SPU_THREAD_GROUP_STATUS_WAITING || group->state > SPU_THREAD_GROUP_STATUS_RUNNING)
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
{
return CELL_ESTAT;
}
@ -687,8 +640,6 @@ s32 sys_spu_thread_write_spu_mb(u32 id, u32 value)
{
sys_spu.warning("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -696,14 +647,11 @@ s32 sys_spu_thread_write_spu_mb(u32 id, u32 value)
return CELL_ESRCH;
}
const auto group = thread->tg.lock();
const auto group = thread->group;
if (!group)
{
fmt::throw_exception("Invalid SPU thread group" HERE);
}
semaphore_lock lock(group->mutex);
if (group->state < SPU_THREAD_GROUP_STATUS_WAITING || group->state > SPU_THREAD_GROUP_STATUS_RUNNING)
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
{
return CELL_ESTAT;
}
@ -717,8 +665,6 @@ s32 sys_spu_thread_set_spu_cfg(u32 id, u64 value)
{
sys_spu.warning("sys_spu_thread_set_spu_cfg(id=0x%x, value=0x%x)", id, value);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -740,8 +686,6 @@ s32 sys_spu_thread_get_spu_cfg(u32 id, vm::ptr<u64> value)
{
sys_spu.warning("sys_spu_thread_get_spu_cfg(id=0x%x, value=*0x%x)", id, value);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -758,8 +702,6 @@ s32 sys_spu_thread_write_snr(u32 id, u32 number, u32 value)
{
sys_spu.trace("sys_spu_thread_write_snr(id=0x%x, number=%d, value=0x%x)", id, number, value);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -772,13 +714,6 @@ s32 sys_spu_thread_write_snr(u32 id, u32 number, u32 value)
return CELL_EINVAL;
}
const auto group = thread->tg.lock();
if (!group)
{
fmt::throw_exception("Invalid SPU thread group" HERE);
}
//if (group->state < SPU_THREAD_GROUP_STATUS_WAITING || group->state > SPU_THREAD_GROUP_STATUS_RUNNING) // ???
//{
// return CELL_ESTAT;
@ -793,8 +728,6 @@ s32 sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et)
{
sys_spu.warning("sys_spu_thread_group_connect_event(id=0x%x, eq=0x%x, et=%d)", id, eq, et);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
@ -803,6 +736,8 @@ s32 sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et)
return CELL_ESRCH;
}
semaphore_lock lock(group->mutex);
switch (et)
{
case SYS_SPU_THREAD_GROUP_EVENT_RUN:
@ -849,8 +784,6 @@ s32 sys_spu_thread_group_disconnect_event(u32 id, u32 et)
{
sys_spu.warning("sys_spu_thread_group_disconnect_event(id=0x%x, et=%d)", id, et);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
@ -858,6 +791,8 @@ s32 sys_spu_thread_group_disconnect_event(u32 id, u32 et)
return CELL_ESRCH;
}
semaphore_lock lock(group->mutex);
switch (et)
{
case SYS_SPU_THREAD_GROUP_EVENT_RUN:
@ -904,8 +839,6 @@ s32 sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
{
sys_spu.warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
@ -920,6 +853,8 @@ s32 sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
return CELL_EINVAL;
}
semaphore_lock lock(thread->group->mutex);
auto& port = thread->spup[spup];
if (!port.expired())
@ -936,8 +871,6 @@ s32 sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup)
{
sys_spu.warning("sys_spu_thread_disconnect_event(id=0x%x, et=%d, spup=%d)", id, et, spup);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -951,6 +884,8 @@ s32 sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup)
return CELL_EINVAL;
}
semaphore_lock lock(thread->group->mutex);
auto& port = thread->spup[spup];
if (port.expired())
@ -967,8 +902,6 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
{
sys_spu.warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(spuq);
@ -982,6 +915,8 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
return CELL_EINVAL;
}
semaphore_lock lock(thread->group->mutex);
for (auto& v : thread->spuq)
{
if (auto q = v.second.lock())
@ -1011,8 +946,6 @@ s32 sys_spu_thread_unbind_queue(u32 id, u32 spuq_num)
{
sys_spu.warning("sys_spu_thread_unbind_queue(id=0x%x, spuq_num=0x%x)", id, spuq_num);
LV2_LOCK;
const auto thread = idm::get<SPUThread>(id);
if (!thread)
@ -1020,6 +953,8 @@ s32 sys_spu_thread_unbind_queue(u32 id, u32 spuq_num)
return CELL_ESRCH;
}
semaphore_lock lock(thread->group->mutex);
for (auto& v : thread->spuq)
{
if (v.first == spuq_num && !v.second.expired())
@ -1037,8 +972,6 @@ s32 sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, vm::
{
sys_spu.warning("sys_spu_thread_group_connect_event_all_threads(id=0x%x, eq=0x%x, req=0x%llx, spup=*0x%x)", id, eq, req, spup);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
@ -1052,7 +985,9 @@ s32 sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, vm::
return CELL_EINVAL;
}
if (group->state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
semaphore_lock lock(group->mutex);
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
return CELL_ESTAT;
}
@ -1108,8 +1043,6 @@ s32 sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
{
sys_spu.warning("sys_spu_thread_group_disconnect_event_all_threads(id=0x%x, spup=%d)", id, spup);
LV2_LOCK;
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
@ -1122,6 +1055,8 @@ s32 sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
return CELL_EINVAL;
}
semaphore_lock lock(group->mutex);
for (auto& t : group->threads)
{
if (t)
@ -1137,8 +1072,6 @@ s32 sys_raw_spu_create(vm::ptr<u32> id, vm::ptr<void> attr)
{
sys_spu.warning("sys_raw_spu_create(id=*0x%x, attr=*0x%x)", id, attr);
LV2_LOCK;
// TODO: check number set by sys_spu_initialize()
const auto thread = idm::make_ptr<RawSPUThread>("");
@ -1159,8 +1092,6 @@ s32 sys_raw_spu_destroy(ppu_thread& ppu, u32 id)
{
sys_spu.warning("sys_raw_spu_destroy(id=%d)", id);
LV2_LOCK;
const auto thread = idm::get<RawSPUThread>(id);
if (!thread)
@ -1183,7 +1114,8 @@ s32 sys_raw_spu_destroy(ppu_thread& ppu, u32 id)
{
if (auto handler = intr.tag->handler.lock())
{
LV2_UNLOCK, handler->join();
// SLEEP
handler->join();
to_remove.emplace(handler.get(), 0);
}
@ -1220,32 +1152,45 @@ s32 sys_raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 hwthread, vm::ptr
{
sys_spu.warning("sys_raw_spu_create_interrupt_tag(id=%d, class_id=%d, hwthread=0x%x, intrtag=*0x%x)", id, class_id, hwthread, intrtag);
LV2_LOCK;
const auto thread = idm::get<RawSPUThread>(id);
if (!thread)
{
return CELL_ESRCH;
}
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
auto& int_ctrl = thread->int_ctrl[class_id];
CellError error = {};
if (int_ctrl.tag)
const auto tag = idm::import<lv2_obj, lv2_int_tag>([&]()
{
return CELL_EAGAIN;
std::shared_ptr<lv2_int_tag> result;
auto thread = idm::check_unlocked<RawSPUThread>(id);
if (!thread)
{
error = CELL_ESRCH;
return result;
}
auto& int_ctrl = thread->int_ctrl[class_id];
if (int_ctrl.tag)
{
error = CELL_EAGAIN;
return result;
}
result = std::make_shared<lv2_int_tag>();
int_ctrl.tag = result;
return result;
});
if (tag)
{
*intrtag = tag;
return CELL_OK;
}
int_ctrl.tag = idm::make_ptr<lv2_obj, lv2_int_tag>();
*intrtag = idm::last_id();
return CELL_OK;
return error;
}
s32 sys_raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask)

View file

@ -148,17 +148,18 @@ struct lv2_spu_group
const s32 type; // SPU Thread Group Type
const u32 ct; // Memory Container Id
semaphore<> mutex;
atomic_t<u32> init; // Initialization Counter
atomic_t<s32> prio; // SPU Thread Group Priority
atomic_t<u32> run_state; // SPU Thread Group State
atomic_t<s32> exit_status; // SPU Thread Group Exit Status
atomic_t<u32> join_state; // flags used to detect exit cause
cond_variable cv; // used to signal waiting PPU thread
std::array<std::shared_ptr<SPUThread>, 256> threads; // SPU Threads
std::array<vm::ps3::ptr<sys_spu_image_t>, 256> images; // SPU Images
std::array<spu_arg_t, 256> args; // SPU Thread Arguments
s32 prio; // SPU Thread Group Priority
volatile u32 state; // SPU Thread Group State
s32 exit_status; // SPU Thread Group Exit Status
atomic_t<u32> join_state; // flags used to detect exit cause
cond_variable cv; // used to signal waiting PPU thread
std::weak_ptr<lv2_event_queue> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
std::weak_ptr<lv2_event_queue> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
std::weak_ptr<lv2_event_queue> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
@ -166,10 +167,11 @@ struct lv2_spu_group
lv2_spu_group(std::string name, u32 num, s32 prio, s32 type, u32 ct)
: name(name)
, num(num)
, init(0)
, prio(prio)
, type(type)
, ct(ct)
, state(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
, run_state(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
, exit_status(0)
, join_state(0)
{