PPU thread scheduler

This commit is contained in:
Nekotekina 2017-02-06 21:36:46 +03:00
parent e4962054a4
commit 598c90f376
41 changed files with 699 additions and 259 deletions

View file

@ -89,6 +89,8 @@ cpu_thread::cpu_thread(u32 id)
bool cpu_thread::check_state()
{
bool cpu_sleep_called = false;
while (true)
{
if (test(state & cpu_flag::exit))
@ -96,11 +98,23 @@ bool cpu_thread::check_state()
return true;
}
if (test(state & cpu_flag::signal) && state.test_and_reset(cpu_flag::signal))
{
cpu_sleep_called = false;
}
if (!test(state & (cpu_state_pause + cpu_flag::dbg_global_stop)))
{
break;
}
if (test(state & cpu_flag::suspend) && !cpu_sleep_called)
{
cpu_sleep();
cpu_sleep_called = true;
continue;
}
thread_ctrl::wait();
}
@ -126,12 +140,6 @@ void cpu_thread::run()
notify();
}
void cpu_thread::set_signal()
{
verify("cpu_flag::signal" HERE), !state.test_and_set(cpu_flag::signal);
notify();
}
std::string cpu_thread::dump() const
{
return fmt::format("Type: %s\n" "State: %s\n", typeid(*this).name(), state.load());

View file

@ -8,7 +8,7 @@ enum class cpu_flag : u32
{
stop, // Thread not running (HLE, initial state)
exit, // Irreversible exit
suspend, // Thread paused
suspend, // Thread suspended
ret, // Callback return requested
signal, // Thread received a signal (HLE)
@ -38,18 +38,12 @@ public:
// Public thread state
atomic_t<bs_t<cpu_flag>> state{+cpu_flag::stop};
// Object associated with sleep state, possibly synchronization primitive (mutex, semaphore, etc.)
atomic_t<void*> owner{};
// Process thread state, return true if the checker must return
bool check_state();
// Run thread
void run();
// Set cpu_flag::signal
void set_signal();
// Check thread type
u32 id_type()
{
@ -64,6 +58,9 @@ public:
// Thread entry point function
virtual void cpu_task() = 0;
// Callback for cpu_flag::suspend
virtual void cpu_sleep() {}
};
inline cpu_thread* get_current_cpu_thread() noexcept

View file

@ -2,6 +2,7 @@
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
extern "C"
{
@ -199,6 +200,7 @@ public:
// TODO: finalize
cellAdec.warning("adecEndSeq:");
cbFunc(*this, id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, cbArg);
lv2_obj::sleep(*this, -1);
just_finished = true;
break;
@ -375,11 +377,13 @@ public:
{
frame.data = nullptr; // to prevent destruction
cbFunc(*this, id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, cbArg);
lv2_obj::sleep(*this, -1);
}
}
}
cbFunc(*this, id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, cbArg);
lv2_obj::sleep(*this, -1);
break;
}

View file

@ -2,6 +2,7 @@
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellPamf.h"
#include "cellDmux.h"
@ -242,6 +243,7 @@ public:
dmuxMsg->msgType = CELL_DMUX_MSG_TYPE_DEMUX_DONE;
dmuxMsg->supplementalInfo = stream.userdata;
cbFunc(*this, id, dmuxMsg, cbArg);
lv2_obj::sleep(*this, -1);
is_working = false;
@ -395,6 +397,7 @@ public:
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this, -1);
}
}
else
@ -460,6 +463,7 @@ public:
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this, -1);
}
if (pes.has_ts)
@ -536,6 +540,7 @@ public:
dmuxMsg->msgType = CELL_DMUX_MSG_TYPE_DEMUX_DONE;
dmuxMsg->supplementalInfo = stream.userdata;
cbFunc(*this, id, dmuxMsg, cbArg);
lv2_obj::sleep(*this, -1);
stream = {};
@ -624,6 +629,7 @@ public:
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this, -1);
}
if (es.raw_data.size())
@ -636,6 +642,7 @@ public:
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_FLUSH_DONE;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this, -1);
break;
}

View file

@ -4,6 +4,7 @@
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_fs.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellFs.h"
#include "Utilities/StrUtil.h"
@ -742,6 +743,7 @@ struct fs_aio_thread : ppu_thread
}
func(*this, aio, error, xid, result);
lv2_obj::sleep(*this, -1);
}
}
};

View file

@ -84,7 +84,7 @@ namespace _spurs
s32 add_default_syswkl(vm::ptr<CellSpurs> spurs, vm::cptr<u8> swlPriority, u32 swlMaxSpu, u32 swlIsPreem);
// Destroy the SPURS SPU threads and thread group
s32 finalize_spu(vm::ptr<CellSpurs> spurs);
s32 finalize_spu(ppu_thread&, vm::ptr<CellSpurs> spurs);
// Stop the event helper thread
s32 stop_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs);
@ -276,7 +276,7 @@ namespace _spurs
//s32 cellSpursEventFlagWait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode);
//s32 cellSpursEventFlagTryWait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u16> mask, u32 mode);
//s32 cellSpursEventFlagAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag);
//s32 cellSpursEventFlagDetachLv2EventQueue(vm::ptr<CellSpursEventFlag> eventFlag);
//s32 cellSpursEventFlagDetachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag);
//s32 cellSpursEventFlagGetDirection(vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u32> direction);
//s32 cellSpursEventFlagGetClearMode(vm::ptr<CellSpursEventFlag> eventFlag, vm::ptr<u32> clear_mode);
//s32 cellSpursEventFlagGetTasksetAddress(vm::ptr<CellSpursEventFlag> eventFlag, vm::pptr<CellSpursTaskset> taskset);
@ -380,7 +380,7 @@ s32 _spurs::create_lv2_eq(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32
if (s32 rc = _spurs::attach_lv2_eq(ppu, spurs, *queueId, port, 1, true))
{
sys_event_queue_destroy(*queueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
sys_event_queue_destroy(ppu, *queueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
}
return CELL_OK;
@ -574,7 +574,7 @@ void _spurs::handler_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
CHECK_SUCCESS(sys_spu_thread_group_start(spurs->spuTG));
if (s32 rc = sys_spu_thread_group_join(spurs->spuTG, vm::null, vm::null))
if (s32 rc = sys_spu_thread_group_join(ppu, spurs->spuTG, vm::null, vm::null))
{
if (rc == CELL_ESTAT)
{
@ -674,7 +674,7 @@ s32 _spurs::wakeup_shutdown_completion_waiter(ppu_thread& ppu, vm::ptr<CellSpurs
if (!wklF->hook || wklEvent->load() & 0x10)
{
verify(HERE), (wklF->x28 == 2);
rc = sys_semaphore_post((u32)wklF->sem, 1);
rc = sys_semaphore_post(ppu, (u32)wklF->sem, 1);
}
return rc;
@ -712,11 +712,11 @@ void _spurs::event_helper_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++)
{
sys_semaphore_post((u32)spurs->wklF1[i].sem, 1);
sys_semaphore_post(ppu, (u32)spurs->wklF1[i].sem, 1);
if (spurs->flags1 & SF1_32_WORKLOADS)
{
sys_semaphore_post((u32)spurs->wklF2[i].sem, 1);
sys_semaphore_post(ppu, (u32)spurs->wklF2[i].sem, 1);
}
}
}
@ -747,7 +747,7 @@ void _spurs::event_helper_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
}
else if (data0 == 2)
{
CHECK_SUCCESS(sys_semaphore_post((u32)spurs->semPrv, 1));
CHECK_SUCCESS(sys_semaphore_post(ppu, (u32)spurs->semPrv, 1));
}
else if (data0 == 3)
{
@ -775,7 +775,7 @@ s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 p
return CELL_SPURS_CORE_ERROR_AGAIN;
}
sys_event_queue_destroy(spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE);
sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE);
return CELL_SPURS_CORE_ERROR_AGAIN;
}
@ -788,7 +788,7 @@ s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 p
return CELL_SPURS_CORE_ERROR_STAT;
}
sys_event_queue_destroy(spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE);
sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE);
return CELL_SPURS_CORE_ERROR_STAT;
}
@ -814,7 +814,7 @@ s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 p
return CELL_SPURS_CORE_ERROR_STAT;
}
sys_event_queue_destroy(spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE);
sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE);
return CELL_SPURS_CORE_ERROR_STAT;
}
@ -839,13 +839,13 @@ s32 _spurs::add_default_syswkl(vm::ptr<CellSpurs> spurs, vm::cptr<u8> swlPriorit
return CELL_OK;
}
s32 _spurs::finalize_spu(vm::ptr<CellSpurs> spurs)
s32 _spurs::finalize_spu(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
{
if (spurs->flags & SAF_UNKNOWN_FLAG_7 || spurs->flags & SAF_UNKNOWN_FLAG_8)
{
while (true)
{
CHECK_SUCCESS(sys_spu_thread_group_join(spurs->spuTG, vm::null, vm::null));
CHECK_SUCCESS(sys_spu_thread_group_join(ppu, spurs->spuTG, vm::null, vm::null));
if (s32 rc = sys_spu_thread_group_destroy(spurs->spuTG))
{
@ -880,7 +880,7 @@ s32 _spurs::stop_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
return CELL_SPURS_CORE_ERROR_STAT;
}
if (sys_event_port_send(spurs->eventPort, 0, 1, 0) != CELL_OK)
if (sys_event_port_send(ppu, spurs->eventPort, 0, 1, 0) != CELL_OK)
{
return CELL_SPURS_CORE_ERROR_STAT;
}
@ -895,7 +895,7 @@ s32 _spurs::stop_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
CHECK_SUCCESS(sys_event_port_disconnect(spurs->eventPort));
CHECK_SUCCESS(sys_event_port_destroy(spurs->eventPort));
CHECK_SUCCESS(_spurs::detach_lv2_eq(spurs, spurs->spuPort, true));
CHECK_SUCCESS(sys_event_queue_destroy(spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE));
CHECK_SUCCESS(sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE));
return CELL_OK;
}
@ -1141,7 +1141,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
// Create a mutex to protect access to SPURS handler thread data
if (s32 rc = sys_lwmutex_create(lwMutex, vm::make_var(sys_lwmutex_attribute_t{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, "_spuPrv" })))
{
_spurs::finalize_spu(spurs);
_spurs::finalize_spu(ppu, spurs);
return rollback(), rc;
}
@ -1149,7 +1149,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
if (s32 rc = sys_lwcond_create(lwCond, lwMutex, vm::make_var(sys_lwcond_attribute_t{ "_spuPrv" })))
{
sys_lwmutex_destroy(ppu, lwMutex);
_spurs::finalize_spu(spurs);
_spurs::finalize_spu(ppu, spurs);
return rollback(), rc;
}
@ -1166,7 +1166,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
{
sys_lwcond_destroy(lwCond);
sys_lwmutex_destroy(ppu, lwMutex);
_spurs::finalize_spu(spurs);
_spurs::finalize_spu(ppu, spurs);
return rollback(), rc;
}
@ -1176,7 +1176,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
_spurs::stop_event_helper(ppu, spurs);
sys_lwcond_destroy(lwCond);
sys_lwmutex_destroy(ppu, lwMutex);
_spurs::finalize_spu(spurs);
_spurs::finalize_spu(ppu, spurs);
return rollback(), rc;
}
@ -1188,7 +1188,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
_spurs::stop_event_helper(ppu, spurs);
sys_lwcond_destroy(lwCond);
sys_lwmutex_destroy(ppu, lwMutex);
_spurs::finalize_spu(spurs);
_spurs::finalize_spu(ppu, spurs);
return rollback(), rc;
}
@ -2787,7 +2787,7 @@ s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag
// Signal the PPU thread to be woken up
eventFlag->pendingRecvTaskEvents[ppuWaitSlot] = ppuEvents;
CHECK_SUCCESS(sys_event_port_send(eventFlag->eventPortId, 0, 0, 0));
CHECK_SUCCESS(sys_event_port_send(ppu, eventFlag->eventPortId, 0, 0, 0));
}
if (pendingRecv)
@ -3067,7 +3067,7 @@ s32 cellSpursEventFlagAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEven
if (_spurs::detach_lv2_eq(spurs, *port, true) == CELL_OK)
{
sys_event_queue_destroy(*eventQueueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
sys_event_queue_destroy(ppu, *eventQueueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
}
return failure(rc);
@ -3077,7 +3077,7 @@ s32 cellSpursEventFlagAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEven
}
/// Detach an LV2 event queue from SPURS event flag
s32 cellSpursEventFlagDetachLv2EventQueue(vm::ptr<CellSpursEventFlag> eventFlag)
s32 cellSpursEventFlagDetachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag)
{
cellSpurs.warning("cellSpursEventFlagDetachLv2EventQueue(eventFlag=*0x%x)", eventFlag);
@ -3131,7 +3131,7 @@ s32 cellSpursEventFlagDetachLv2EventQueue(vm::ptr<CellSpursEventFlag> eventFlag)
if (rc == CELL_OK)
{
rc = sys_event_queue_destroy(eventFlag->eventQueueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
rc = sys_event_queue_destroy(ppu, eventFlag->eventQueueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
}
return CELL_OK;

View file

@ -2,6 +2,7 @@
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
extern "C"
{
@ -77,8 +78,8 @@ struct vdec_thread : ppu_thread
std::mutex mutex;
std::queue<vdec_frame> out;
vdec_thread(s32 type, u32 profile, u32 addr, u32 size, vm::ptr<CellVdecCbMsg> func, u32 arg)
: ppu_thread("HLE Video Decoder")
vdec_thread(s32 type, u32 profile, u32 addr, u32 size, vm::ptr<CellVdecCbMsg> func, u32 arg, u32 prio, u32 stack)
: ppu_thread("HLE Video Decoder", prio, stack)
, type(type)
, profile(profile)
, mem_addr(addr)
@ -327,6 +328,7 @@ struct vdec_thread : ppu_thread
std::lock_guard<std::mutex>{mutex}, out.push(std::move(frame));
cb_func(*this, id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, cb_arg);
lv2_obj::sleep(*this, -1);
}
if (vcmd == vdec_cmd::decode)
@ -336,6 +338,7 @@ struct vdec_thread : ppu_thread
}
cb_func(*this, id, vcmd == vdec_cmd::decode ? CELL_VDEC_MSG_TYPE_AUDONE : CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, cb_arg);
lv2_obj::sleep(*this, -1);
while (std::lock_guard<std::mutex>{mutex}, out.size() > 60)
{
@ -405,7 +408,7 @@ s32 cellVdecOpen(vm::cptr<CellVdecType> type, vm::cptr<CellVdecResource> res, vm
cellVdec.warning("cellVdecOpen(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
// Create decoder thread
auto&& vdec = idm::make_ptr<ppu_thread, vdec_thread>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg);
auto&& vdec = idm::make_ptr<ppu_thread, vdec_thread>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg, res->ppuThreadPriority, res->ppuThreadStackSize);
// Hack: store thread id (normally it should be pointer)
*handle = vdec->id;
@ -420,7 +423,7 @@ s32 cellVdecOpenEx(vm::cptr<CellVdecTypeEx> type, vm::cptr<CellVdecResourceEx> r
cellVdec.warning("cellVdecOpenEx(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
// Create decoder thread
auto&& vdec = idm::make_ptr<ppu_thread, vdec_thread>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg);
auto&& vdec = idm::make_ptr<ppu_thread, vdec_thread>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg, res->ppuThreadPriority, res->ppuThreadStackSize);
// Hack: store thread id (normally it should be pointer)
*handle = vdec->id;
@ -430,7 +433,7 @@ s32 cellVdecOpenEx(vm::cptr<CellVdecTypeEx> type, vm::cptr<CellVdecResourceEx> r
return CELL_OK;
}
s32 cellVdecClose(u32 handle)
s32 cellVdecClose(ppu_thread& ppu, u32 handle)
{
cellVdec.warning("cellVdecClose(handle=0x%x)", handle);
@ -441,6 +444,7 @@ s32 cellVdecClose(u32 handle)
return CELL_VDEC_ERROR_ARG;
}
lv2_obj::sleep(ppu, -1);
vdec->cmd_push({vdec_cmd::close, 0});
vdec->notify();
vdec->join();

View file

@ -2,6 +2,7 @@
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellAudio.h"
#include "libmixer.h"
@ -347,6 +348,7 @@ struct surmixer_thread : ppu_thread
if (g_surmx.cb)
{
g_surmx.cb(*this, g_surmx.cb_arg, (u32)g_surmx.mixcount, 256);
lv2_obj::sleep(*this, -1);
}
//u64 stamp1 = get_system_time();

View file

@ -56,7 +56,7 @@ s32 sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1))
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1))
{
lwmutex->all_info--;
@ -76,14 +76,14 @@ s32 sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
}
// call the syscall
return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2);
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, -1, 2);
}
// if locking succeeded
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3))
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3))
{
lwmutex->all_info--;
@ -111,7 +111,7 @@ s32 sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
if (lwmutex->vars.owner.load() == ppu.id)
{
// if owns the mutex, call the syscall
const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
const s32 res = _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
if (res <= 0)
{
@ -134,11 +134,11 @@ s32 sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
}
// call the syscall
return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
return _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
}
// if locking succeeded, call the syscall
s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
s32 res = _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
if (res > 0)
{
@ -171,7 +171,7 @@ s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1))
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1))
{
lwmutex->all_info--;
@ -191,14 +191,14 @@ s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_
}
// call the syscall
return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2);
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, ppu_thread_id, 2);
}
// if locking succeeded
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3))
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3))
{
lwmutex->all_info--;

View file

@ -273,7 +273,7 @@ s32 sys_lwmutex_unlock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
lwmutex->vars.owner.exchange(lwmutex_reserved);
// call the syscall
if (_sys_lwmutex_unlock(lwmutex->sleep_queue) == CELL_ESRCH)
if (_sys_lwmutex_unlock(ppu, lwmutex->sleep_queue) == CELL_ESRCH)
{
return CELL_ESRCH;
}

View file

@ -12,7 +12,7 @@ extern logs::channel sysPrxForUser;
extern u32 ppu_alloc_tls();
extern void ppu_free_tls(u32 addr);
s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::cptr<char> threadname)
s32 sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::cptr<char> threadname)
{
sysPrxForUser.warning("sys_ppu_thread_create(thread_id=*0x%x, entry=0x%x, arg=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=%s)",
thread_id, entry, arg, prio, stacksize, flags, threadname);
@ -37,7 +37,7 @@ s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio,
}
// Run the thread
if (s32 res = sys_ppu_thread_start(static_cast<u32>(*thread_id)))
if (s32 res = sys_ppu_thread_start(ppu, static_cast<u32>(*thread_id)))
{
return res;
}
@ -45,6 +45,8 @@ s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio,
// Dirty hack for sound: confirm the creation of _mxr000 event queue
if (threadname && std::memcmp(threadname.get_ptr(), "_cellsurMixerMain", 18) == 0)
{
lv2_obj::sleep(ppu, -1);
while (!idm::select<lv2_obj, lv2_event_queue>([](u32, lv2_event_queue& eq)
{
return eq.name == "_mxr000\0"_u64;

View file

@ -8,6 +8,7 @@
#include "PPUInterpreter.h"
#include "PPUAnalyser.h"
#include "PPUModule.h"
#include "lv2/sys_sync.h"
#ifdef LLVM_AVAILABLE
#include "restore_new.h"
@ -195,7 +196,7 @@ std::string ppu_thread::get_name() const
std::string ppu_thread::dump() const
{
std::string ret = cpu_thread::dump();
ret += fmt::format("Priority: %d\n", prio);
ret += fmt::format("Priority: %d\n", +prio);
ret += fmt::format("Last function: %s\n", last_function ? last_function : "");
ret += "\nRegisters:\n=========\n";
@ -286,6 +287,11 @@ void ppu_thread::cpu_task()
cmd_pop(), ppu_initialize();
break;
}
case ppu_cmd::sleep:
{
cmd_pop(), lv2_obj::sleep(*this, -1);
break;
}
default:
{
fmt::throw_exception("Unknown ppu_cmd(0x%x)" HERE, (u32)type);
@ -402,6 +408,9 @@ ppu_thread::ppu_thread(const std::string& name, u32 prio, u32 stack)
}
gpr[1] = ::align(stack_addr + stack_size, 0x200) - 0x200;
// Trigger the scheduler
state += cpu_flag::suspend;
}
void ppu_thread::cmd_push(cmd64 cmd)
@ -449,7 +458,7 @@ cmd64 ppu_thread::cmd_wait()
{
if (UNLIKELY(test(state)))
{
if (check_state())
if (test(state, cpu_flag::stop + cpu_flag::exit))
{
return cmd64{};
}

View file

@ -15,6 +15,7 @@ enum class ppu_cmd : u32
lle_call, // Load addr and rtoc at *arg or *gpr[arg] and execute
hle_call, // Execute function by index (arg)
initialize, // ppu_initialize()
sleep,
};
class ppu_thread : public cpu_thread
@ -27,6 +28,7 @@ public:
virtual std::string get_name() const override;
virtual std::string dump() const override;
virtual void cpu_task() override;
virtual void cpu_sleep() override;
virtual ~ppu_thread() override;
ppu_thread(const std::string& name, u32 prio = 0, u32 stack = 0x10000);
@ -117,7 +119,7 @@ public:
u64 ctr{}; // Counter Register
u32 vrsave{0xffffffff}; // VR Save Register (almost unused)
u32 prio = 0; // Thread priority (0..3071)
atomic_t<u32> prio{0}; // Thread priority (0..3071)
const u32 stack_size; // Stack size
const u32 stack_addr; // Stack address

View file

@ -1015,3 +1015,179 @@ extern ppu_function_t ppu_get_syscall(u64 code)
return nullptr;
}
extern u64 get_system_time();
DECLARE(lv2_obj::g_mutex);
DECLARE(lv2_obj::g_ppu);
DECLARE(lv2_obj::g_pending);
DECLARE(lv2_obj::g_waiting);
// Amount of PPU threads running simultaneously (must be 2)
cfg::int_entry<1, 16> g_cfg_ppu_threads(cfg::root.core, "PPU Threads", 2);
void lv2_obj::sleep(named_thread& thread, u64 wait_until)
{
semaphore_lock lock(g_mutex);
if (auto ppu = dynamic_cast<ppu_thread*>(&thread))
{
sys_ppu_thread.trace("sleep() - waiting (%zu)", g_pending.size());
auto state = ppu->state.fetch_op([&](auto& val)
{
if (!test(val, cpu_flag::signal))
{
val += cpu_flag::suspend;
}
});
if (test(state, cpu_flag::signal))
{
sys_ppu_thread.error("sleep() failed (signaled)");
}
// Find and remove the thread
unqueue(g_ppu, ppu);
unqueue(g_pending, ppu);
}
if (wait_until < -2)
{
// Register timeout if necessary
for (auto it = g_waiting.begin(), end = g_waiting.end(); it != end; it++)
{
if (it->first > wait_until)
{
g_waiting.emplace(it, wait_until, &thread);
return;
}
}
g_waiting.emplace_back(wait_until, &thread);
}
schedule_all();
}
void lv2_obj::awake(cpu_thread& cpu, u32 prio)
{
// Check thread type
if (cpu.id_type() != 1) return;
semaphore_lock lock(g_mutex);
if (prio == -4)
{
// Yield command
unqueue(g_ppu, &cpu);
unqueue(g_pending, &cpu);
}
if (prio < INT32_MAX && !unqueue(g_ppu, &cpu))
{
// Priority set
return;
}
// Emplace current thread
for (std::size_t i = 0; i <= g_ppu.size(); i++)
{
if (i < g_ppu.size() && g_ppu[i] == &cpu)
{
sys_ppu_thread.trace("sleep() - suspended (p=%zu)", g_pending.size());
break;
}
// Use priority, also preserve FIFO order
if (i == g_ppu.size() || g_ppu[i]->prio > static_cast<ppu_thread&>(cpu).prio)
{
sys_ppu_thread.trace("awake(): %s", cpu.id);
g_ppu.insert(g_ppu.cbegin() + i, &static_cast<ppu_thread&>(cpu));
// Unregister timeout if necessary
for (auto it = g_waiting.cbegin(), end = g_waiting.cend(); it != end; it++)
{
if (it->second == &cpu)
{
g_waiting.erase(it);
break;
}
}
break;
}
}
// Remove pending if necessary
if (!g_pending.empty() && cpu.get() == thread_ctrl::get_current())
{
unqueue(g_pending, &cpu);
}
// Suspend threads if necessary
for (std::size_t i = g_cfg_ppu_threads; i < g_ppu.size(); i++)
{
const auto target = g_ppu[i];
if (!target->state.test_and_set(cpu_flag::suspend))
{
sys_ppu_thread.trace("suspend(): %s", target->id);
g_pending.emplace_back(target);
}
}
schedule_all();
}
void lv2_obj::cleanup()
{
g_ppu.clear();
g_pending.clear();
g_waiting.clear();
}
void lv2_obj::schedule_all()
{
if (g_pending.empty())
{
// Wake up threads
for (std::size_t i = 0, x = std::min<std::size_t>(g_cfg_ppu_threads, g_ppu.size()); i < x; i++)
{
const auto target = g_ppu[i];
if (test(target->state, cpu_flag::suspend))
{
sys_ppu_thread.trace("schedule(): %s", target->id);
target->state ^= (cpu_flag::signal + cpu_flag::suspend);
if (target->get() != thread_ctrl::get_current())
{
target->notify();
}
}
}
}
// Check registered timeouts
while (!g_waiting.empty())
{
auto& pair = g_waiting.front();
if (pair.first <= get_system_time())
{
pair.second->notify();
g_waiting.pop_front();
}
else
{
// The list is sorted so assume no more timeouts
break;
}
}
}
void ppu_thread::cpu_sleep()
{
lv2_obj::awake(*this);
}

View file

@ -68,7 +68,7 @@ error_code sys_cond_destroy(u32 cond_id)
return CELL_OK;
}
error_code sys_cond_signal(u32 cond_id)
error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
{
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
@ -99,13 +99,14 @@ error_code sys_cond_signal(u32 cond_id)
if (cond.ret)
{
cond.ret->set_signal();
cond->awake(*cond.ret);
ppu.check_state();
}
return CELL_OK;
}
error_code sys_cond_signal_all(u32 cond_id)
error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
{
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
@ -138,13 +139,14 @@ error_code sys_cond_signal_all(u32 cond_id)
if (cond.ret)
{
cond.ret->set_signal();
cond->awake(*cond.ret);
ppu.check_state();
}
return CELL_OK;
}
error_code sys_cond_signal_to(u32 cond_id, u32 thread_id)
error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
{
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id);
@ -180,7 +182,8 @@ error_code sys_cond_signal_to(u32 cond_id, u32 thread_id)
if (cond.ret && cond.ret != (cpu_thread*)(1))
{
cond.ret->set_signal();
cond->awake(*cond.ret);
ppu.check_state();
}
else if (!cond.ret)
{
@ -223,6 +226,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
// Register waiter
cond->sq.emplace_back(&ppu);
cond->sleep(ppu, start_time, timeout);
// Unlock the mutex
cond->mutex->lock_count = 0;
@ -232,8 +236,6 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
ppu.gpr[3] = CELL_OK;
}
// SLEEP
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
if (timeout)
@ -276,6 +278,8 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
// Restore the recursive value
cond->mutex->lock_count = cond.ret;
ppu.check_state();
if (ppu.gpr[3] == CELL_ETIMEDOUT)
{
return not_an_error(CELL_ETIMEDOUT);

View file

@ -48,6 +48,6 @@ class ppu_thread;
error_code sys_cond_create(vm::ps3::ptr<u32> cond_id, u32 mutex_id, vm::ps3::ptr<sys_cond_attribute_t> attr);
error_code sys_cond_destroy(u32 cond_id);
error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout);
error_code sys_cond_signal(u32 cond_id);
error_code sys_cond_signal_all(u32 cond_id);
error_code sys_cond_signal_to(u32 cond_id, u32 thread_id);
error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id);
error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id);
error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id);

View file

@ -52,7 +52,7 @@ bool lv2_event_queue::send(lv2_event event)
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event;
ppu.set_signal();
awake(ppu);
}
else
{
@ -67,7 +67,8 @@ bool lv2_event_queue::send(lv2_event event)
const u32 data3 = static_cast<u32>(std::get<3>(event));
spu.ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3);
spu.set_signal();
spu.state += cpu_flag::signal;
spu.notify();
}
return true;
@ -131,7 +132,7 @@ error_code sys_event_queue_create(vm::ptr<u32> equeue_id, vm::ptr<sys_event_queu
return CELL_EAGAIN;
}
error_code sys_event_queue_destroy(u32 equeue_id, s32 mode)
error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode)
{
sys_event.warning("sys_event_queue_destroy(equeue_id=0x%x, mode=%d)", equeue_id, mode);
@ -171,16 +172,19 @@ error_code sys_event_queue_destroy(u32 equeue_id, s32 mode)
if (queue->type == SYS_PPU_QUEUE)
{
static_cast<ppu_thread&>(*cpu).gpr[3] = CELL_ECANCELED;
queue->awake(*cpu);
}
else
{
static_cast<SPUThread&>(*cpu).ch_in_mbox.set_values(1, CELL_ECANCELED);
cpu->state += cpu_flag::signal;
cpu->notify();
}
cpu->set_signal();
}
}
ppu.check_state();
return CELL_OK;
}
@ -236,6 +240,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
if (queue.events.empty())
{
queue.sq.emplace_back(&ppu);
queue.sleep(ppu, start_time, timeout);
return CELL_EBUSY;
}
@ -280,7 +285,8 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
continue;
}
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -291,7 +297,8 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
}
}
return not_an_error(ppu.gpr[3] ? CELL_ECANCELED : CELL_OK);
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_queue_drain(u32 equeue_id)
@ -412,7 +419,7 @@ error_code sys_event_port_disconnect(u32 eport_id)
return CELL_OK;
}
error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
error_code sys_event_port_send(ppu_thread& ppu, u32 eport_id, u64 data1, u64 data2, u64 data3)
{
sys_event.trace("sys_event_port_send(eport_id=0x%x, data1=0x%llx, data2=0x%llx, data3=0x%llx)", eport_id, data1, data2, data3);
@ -448,5 +455,6 @@ error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
return port.ret;
}
ppu.check_state();
return CELL_OK;
}

View file

@ -128,7 +128,7 @@ class ppu_thread;
// Syscalls
error_code sys_event_queue_create(vm::ps3::ptr<u32> equeue_id, vm::ps3::ptr<sys_event_queue_attribute_t> attr, u64 event_queue_key, s32 size);
error_code sys_event_queue_destroy(u32 equeue_id, s32 mode);
error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode);
error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ps3::ptr<sys_event_t> dummy_event, u64 timeout);
error_code sys_event_queue_tryreceive(u32 equeue_id, vm::ps3::ptr<sys_event_t> event_array, s32 size, vm::ps3::ptr<u32> number);
error_code sys_event_queue_drain(u32 event_queue_id);
@ -137,4 +137,4 @@ error_code sys_event_port_create(vm::ps3::ptr<u32> eport_id, s32 port_type, u64
error_code sys_event_port_destroy(u32 eport_id);
error_code sys_event_port_connect_local(u32 event_port_id, u32 event_queue_id);
error_code sys_event_port_disconnect(u32 eport_id);
error_code sys_event_port_send(u32 event_port_id, u64 data1, u64 data2, u64 data3);
error_code sys_event_port_send(ppu_thread& ppu, u32 event_port_id, u64 data1, u64 data2, u64 data3);

View file

@ -26,6 +26,11 @@ error_code sys_event_flag_create(vm::ptr<u32> id, vm::ptr<sys_event_flag_attribu
const u32 protocol = attr->protocol;
if (protocol == SYS_SYNC_RETRY)
sys_event_flag.todo("sys_event_flag_create(): SYS_SYNC_RETRY");
if (protocol == SYS_SYNC_PRIORITY_INHERIT)
sys_event_flag.todo("sys_event_flag_create(): SYS_SYNC_PRIORITY_INHERIT");
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY && protocol != SYS_SYNC_PRIORITY && protocol != SYS_SYNC_PRIORITY_INHERIT)
{
sys_event_flag.error("sys_event_flag_create(): unknown protocol (0x%x)", protocol);
@ -89,6 +94,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
const u64 start_time = ppu.gpr[10] = get_system_time();
// Fix function arguments for external access
ppu.gpr[3] = -1;
ppu.gpr[4] = bitptn;
ppu.gpr[5] = mode;
ppu.gpr[6] = 0;
@ -124,6 +130,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
flag.waiters++;
flag.sq.emplace_back(&ppu);
flag.sleep(ppu, start_time, timeout);
return CELL_EBUSY;
});
@ -145,8 +152,6 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
return CELL_OK;
}
// SLEEP
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
if (timeout)
@ -164,8 +169,9 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
}
flag->waiters--;
if (result) *result = flag->pattern;
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
ppu.gpr[6] = flag->pattern;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -176,8 +182,9 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
}
}
ppu.check_state();
if (result) *result = ppu.gpr[6];
return not_an_error(ppu.gpr[5] ? CELL_OK : CELL_ECANCELED);
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_flag_trywait(u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result)
@ -231,48 +238,63 @@ error_code sys_event_flag_set(u32 id, u64 bitptn)
return CELL_OK;
}
semaphore_lock lock(flag->mutex);
// Sort sleep queue in required order
if (flag->protocol != SYS_SYNC_FIFO)
if (true)
{
std::stable_sort(flag->sq.begin(), flag->sq.end(), [](cpu_thread* a, cpu_thread* b)
semaphore_lock lock(flag->mutex);
// Sort sleep queue in required order
if (flag->protocol != SYS_SYNC_FIFO)
{
return static_cast<ppu_thread*>(a)->prio < static_cast<ppu_thread*>(b)->prio;
std::stable_sort(flag->sq.begin(), flag->sq.end(), [](cpu_thread* a, cpu_thread* b)
{
return static_cast<ppu_thread*>(a)->prio < static_cast<ppu_thread*>(b)->prio;
});
}
// Process all waiters in single atomic op
flag->pattern.atomic_op([&](u64& value)
{
value |= bitptn;
for (auto cpu : flag->sq)
{
auto& ppu = static_cast<ppu_thread&>(*cpu);
const u64 pattern = ppu.gpr[4];
const u64 mode = ppu.gpr[5];
if (lv2_event_flag::check_pattern(value, pattern, mode, &ppu.gpr[6]))
{
ppu.gpr[3] = CELL_OK;
}
}
});
}
// Process all waiters in single atomic op
flag->pattern.atomic_op([&](u64& value)
{
value |= bitptn;
for (auto cpu : flag->sq)
// Remove waiters
const auto tail = std::remove_if(flag->sq.begin(), flag->sq.end(), [&](cpu_thread* cpu)
{
auto& ppu = static_cast<ppu_thread&>(*cpu);
const u64 pattern = ppu.gpr[4];
const u64 mode = ppu.gpr[5];
ppu.gpr[3] = lv2_event_flag::check_pattern(value, pattern, mode, &ppu.gpr[6]);
}
});
if (ppu.gpr[3] == CELL_OK)
{
flag->waiters--;
flag->awake(ppu);
return true;
}
// Remove waiters
const auto tail = std::remove_if(flag->sq.begin(), flag->sq.end(), [&](cpu_thread* cpu)
return false;
});
flag->sq.erase(tail, flag->sq.end());
}
if (auto cpu = get_current_cpu_thread())
{
auto& ppu = static_cast<ppu_thread&>(*cpu);
if (ppu.gpr[3])
if (cpu->id_type() == 1)
{
flag->waiters--;
ppu.set_signal();
return true;
static_cast<ppu_thread&>(*cpu).check_state();
}
return false;
});
flag->sq.erase(tail, flag->sq.end());
}
return CELL_OK;
}
@ -294,7 +316,7 @@ error_code sys_event_flag_clear(u32 id, u64 bitptn)
return CELL_OK;
}
error_code sys_event_flag_cancel(u32 id, vm::ptr<u32> num)
error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
{
sys_event_flag.trace("sys_event_flag_cancel(id=0x%x, num=*0x%x)", id, num);
@ -307,26 +329,33 @@ error_code sys_event_flag_cancel(u32 id, vm::ptr<u32> num)
return CELL_ESRCH;
}
semaphore_lock lock(flag->mutex);
// Get current pattern
const u64 pattern = flag->pattern;
// Set count
*num = ::size32(flag->sq);
// Signal all threads to return CELL_ECANCELED
while (auto thread = flag->schedule<ppu_thread>(flag->sq, flag->protocol))
u32 value = 0;
{
auto& ppu = static_cast<ppu_thread&>(*thread);
semaphore_lock lock(flag->mutex);
ppu.gpr[4] = pattern;
ppu.gpr[5] = 0;
// Get current pattern
const u64 pattern = flag->pattern;
thread->set_signal();
flag->waiters--;
// Set count
value = ::size32(flag->sq);
// Signal all threads to return CELL_ECANCELED
while (auto thread = flag->schedule<ppu_thread>(flag->sq, flag->protocol))
{
auto& ppu = static_cast<ppu_thread&>(*thread);
ppu.gpr[3] = CELL_ECANCELED;
ppu.gpr[6] = pattern;
flag->waiters--;
flag->awake(ppu);
}
}
if (num) *num = value;
ppu.check_state();
return CELL_OK;
}

View file

@ -118,5 +118,5 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
error_code sys_event_flag_trywait(u32 id, u64 bitptn, u32 mode, vm::ps3::ptr<u64> result);
error_code sys_event_flag_set(u32 id, u64 bitptn);
error_code sys_event_flag_clear(u32 id, u64 bitptn);
error_code sys_event_flag_cancel(u32 id, vm::ps3::ptr<u32> num);
error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ps3::ptr<u32> num);
error_code sys_event_flag_get(u32 id, vm::ps3::ptr<u64> flags);

View file

@ -18,6 +18,7 @@ void lv2_int_serv::exec()
({
{ ppu_cmd::set_args, 2 }, arg1, arg2,
{ ppu_cmd::lle_call, 2 },
{ ppu_cmd::sleep, 0 }
});
thread->notify();

View file

@ -60,7 +60,7 @@ error_code _sys_lwcond_destroy(u32 lwcond_id)
return CELL_OK;
}
error_code _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode)
error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode)
{
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, ppu_thread_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, ppu_thread_id, mode);
@ -106,7 +106,10 @@ error_code _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id,
{
cond.waiters--;
static_cast<ppu_thread*>(result)->gpr[3] = mode == 2;
if (mode == 2)
{
static_cast<ppu_thread*>(result)->gpr[3] = CELL_EBUSY;
}
if (mode != 2 && !mutex->signaled.fetch_op([](u32& v) { if (v) v--; }))
{
@ -129,7 +132,8 @@ error_code _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id,
if (cond.ret)
{
cond.ret->set_signal();
cond->awake(*cond.ret);
ppu.check_state();
}
else if (mode == 2)
{
@ -147,7 +151,7 @@ error_code _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id,
return CELL_OK;
}
error_code _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode)
error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 mode)
{
sys_lwcond.trace("_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, mode);
@ -177,7 +181,10 @@ error_code _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode)
{
cond.waiters--;
static_cast<ppu_thread*>(cpu)->gpr[3] = mode == 2;
if (mode == 2)
{
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
}
if (mode != 2 && !mutex->signaled.fetch_op([](u32& v) { if (v) v--; }))
{
@ -202,10 +209,14 @@ error_code _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode)
return CELL_ESRCH;
}
// TODO: signal only one thread
for (auto cpu : threads)
{
cpu->set_signal();
cond->awake(*cpu);
}
if (threads.size())
{
ppu.check_state();
}
if (mode == 1)
@ -239,6 +250,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
// Add a waiter
cond.waiters++;
cond.sq.emplace_back(&ppu);
cond.sleep(ppu, start_time, timeout);
// Process lwmutex sleep queue
if (const auto cpu = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol))
@ -257,10 +269,10 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
if (cond.ret)
{
cond.ret->set_signal();
cond->awake(*cond.ret);
}
// SLEEP
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
@ -282,10 +294,12 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
if (mutex->signaled.fetch_op([](u32& v) { if (v) v--; }))
{
return not_an_error(CELL_EDEADLK);
ppu.gpr[3] = CELL_EDEADLK;
break;
}
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -297,5 +311,6 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
}
// Return cause
return not_an_error(ppu.gpr[3] ? CELL_EBUSY : CELL_OK);
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}

View file

@ -43,6 +43,6 @@ class ppu_thread;
error_code _sys_lwcond_create(vm::ps3::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ps3::ptr<sys_lwcond_t> control, u64 name, u32 arg5);
error_code _sys_lwcond_destroy(u32 lwcond_id);
error_code _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode);
error_code _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode);
error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode);
error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 mode);
error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout);

View file

@ -17,6 +17,9 @@ error_code _sys_lwmutex_create(vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sy
{
sys_lwmutex.warning("_sys_lwmutex_create(lwmutex_id=*0x%x, protocol=0x%x, control=*0x%x, arg4=0x%x, name=0x%llx, arg6=0x%x)", lwmutex_id, protocol, control, arg4, name, arg6);
if (protocol == SYS_SYNC_RETRY)
sys_lwmutex.todo("_sys_lwmutex_create(): SYS_SYNC_RETRY");
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY && protocol != SYS_SYNC_PRIORITY)
{
sys_lwmutex.error("_sys_lwmutex_create(): unknown protocol (0x%x)", protocol);
@ -93,6 +96,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
}
mutex.sq.emplace_back(&ppu);
mutex.sleep(ppu, start_time, timeout);
return false;
});
@ -106,7 +110,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
return CELL_OK;
}
// SLEEP
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
@ -124,7 +128,8 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
continue;
}
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -135,7 +140,8 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
}
}
return CELL_OK;
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}
error_code _sys_lwmutex_trylock(u32 lwmutex_id)
@ -168,7 +174,7 @@ error_code _sys_lwmutex_trylock(u32 lwmutex_id)
return CELL_OK;
}
error_code _sys_lwmutex_unlock(u32 lwmutex_id)
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
{
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
@ -192,7 +198,8 @@ error_code _sys_lwmutex_unlock(u32 lwmutex_id)
if (mutex.ret)
{
mutex.ret->set_signal();
mutex->awake(*mutex.ret);
ppu.check_state();
}
return CELL_OK;

View file

@ -78,4 +78,4 @@ error_code _sys_lwmutex_create(vm::ps3::ptr<u32> lwmutex_id, u32 protocol, vm::p
error_code _sys_lwmutex_destroy(u32 lwmutex_id);
error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout);
error_code _sys_lwmutex_trylock(u32 lwmutex_id);
error_code _sys_lwmutex_unlock(u32 lwmutex_id);
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id);

View file

@ -28,8 +28,9 @@ error_code sys_mutex_create(vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t
{
case SYS_SYNC_FIFO: break;
case SYS_SYNC_PRIORITY: break;
case SYS_SYNC_PRIORITY_INHERIT: break;
case SYS_SYNC_PRIORITY_INHERIT:
sys_mutex.todo("sys_mutex_create(): SYS_SYNC_PRIORITY_INHERIT");
break;
default:
{
sys_mutex.error("sys_mutex_create(): unknown protocol (0x%x)", protocol);
@ -105,7 +106,23 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex)
{
return mutex.lock(ppu, ppu.id);
CellError result = mutex.try_lock(ppu.id);
if (result == CELL_EBUSY)
{
semaphore_lock lock(mutex.mutex);
if (mutex.try_own(ppu, ppu.id))
{
result = {};
}
else
{
mutex.sleep(ppu, start_time, timeout);
}
}
return result;
});
if (!mutex)
@ -125,7 +142,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
return CELL_OK;
}
// SLEEP
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
@ -143,7 +160,8 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
continue;
}
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -154,7 +172,8 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
}
}
return CELL_OK;
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id)

View file

@ -101,23 +101,6 @@ struct lv2_mutex final : lv2_obj
return true;
}
CellError lock(cpu_thread& cpu, u32 id)
{
CellError result = try_lock(id);
if (result == CELL_EBUSY)
{
semaphore_lock lock(mutex);
if (try_own(cpu, id))
{
return {};
}
}
return result;
}
CellError try_unlock(u32 id)
{
const u32 value = owner;
@ -151,7 +134,7 @@ struct lv2_mutex final : lv2_obj
{
owner = cpu->id << 1 | !sq.empty();
cpu->set_signal();
awake(*cpu);
}
else
{

View file

@ -7,23 +7,51 @@
#include "Emu/Cell/PPUThread.h"
#include "sys_ppu_thread.h"
#include <thread>
namespace vm { using namespace ps3; }
logs::channel sys_ppu_thread("sys_ppu_thread", logs::level::notice);
void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
{
sys_ppu_thread.warning("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
ppu.state += cpu_flag::exit;
// Delete detached thread
if (ppu.joiner == -1)
// Get joiner ID
const u32 jid = ppu.joiner.fetch_op([](u32& value)
{
if (value == 0)
{
// Joinable, not joined
value = -3;
}
else if (value != -1)
{
// Joinable, joined
value = -2;
}
// Detached otherwise
});
if (jid == -1)
{
// Delete detached thread and unqueue
idm::remove<ppu_thread>(ppu.id);
}
else if (jid != 0)
{
writer_lock lock(id_manager::g_mutex);
// Schedule joiner and unqueue
lv2_obj::awake(*idm::check_unlocked<ppu_thread>(jid), -2);
}
// Unqueue
lv2_obj::sleep(ppu, -1);
// Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend;
// Throw if this syscall was not called directly by the SC instruction (hack)
if (ppu.lr == 0 || ppu.gpr[11] != 41)
@ -32,41 +60,67 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
}
}
void sys_ppu_thread_yield()
void sys_ppu_thread_yield(ppu_thread& ppu)
{
sys_ppu_thread.trace("sys_ppu_thread_yield()");
std::this_thread::yield();
lv2_obj::awake(ppu, -4);
ppu.check_state();
}
error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr)
{
sys_ppu_thread.warning("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)", thread_id, vptr);
sys_ppu_thread.trace("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)", thread_id, vptr);
const auto thread = idm::get<ppu_thread>(thread_id);
const auto thread = idm::get<ppu_thread>(thread_id, [&](ppu_thread& thread) -> CellError
{
CellError result = thread.joiner.atomic_op([&](u32& value) -> CellError
{
if (value == -3)
{
value = -2;
return CELL_EBUSY;
}
if (value == -2)
{
return CELL_ESRCH;
}
if (value)
{
return CELL_EINVAL;
}
// TODO: check precedence?
if (&ppu == &thread)
{
return CELL_EDEADLK;
}
value = ppu.id;
return {};
});
if (!result)
{
lv2_obj::sleep(ppu, -1);
}
return result;
});
if (!thread)
{
return CELL_ESRCH;
}
// TODO: this is race condition if EDEADLK check doesn't actually take the precedence
if (thread->joiner)
if (thread.ret && thread.ret != CELL_EBUSY)
{
return CELL_EINVAL;
return thread.ret;
}
if (&ppu == thread.get())
{
return CELL_EDEADLK;
}
if (!thread->joiner.compare_and_swap_test(0, ppu.id))
{
return CELL_EINVAL;
}
// Actually join
// Wait for cleanup
thread->join();
// Get the exit status from the register
@ -83,33 +137,53 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
error_code sys_ppu_thread_detach(u32 thread_id)
{
sys_ppu_thread.warning("sys_ppu_thread_detach(thread_id=0x%x)", thread_id);
sys_ppu_thread.trace("sys_ppu_thread_detach(thread_id=0x%x)", thread_id);
const auto thread = idm::get<ppu_thread>(thread_id);
const auto thread = idm::check<ppu_thread>(thread_id, [&](ppu_thread& thread) -> CellError
{
return thread.joiner.atomic_op([&](u32& value) -> CellError
{
if (value == -3)
{
value = -2;
return CELL_EAGAIN;
}
if (value == -2)
{
return CELL_ESRCH;
}
if (value == -1)
{
return CELL_EINVAL;
}
if (value)
{
return CELL_EBUSY;
}
value = -1;
return {};
});
});
if (!thread)
{
return CELL_ESRCH;
}
u32 joiner = thread->joiner;
if (!joiner)
if (thread.ret && thread.ret != CELL_EAGAIN)
{
// Detach thread by assigning -1
joiner = thread->joiner.compare_and_swap(0, -1);
return thread.ret;
}
if (joiner == -1)
if (thread.ret == CELL_EAGAIN)
{
return CELL_EINVAL;
idm::remove<ppu_thread>(thread_id);
}
if (joiner)
{
return CELL_EBUSY;
}
return CELL_OK;
}
@ -120,23 +194,32 @@ void sys_ppu_thread_get_join_state(ppu_thread& ppu, vm::ptr<s32> isjoinable)
*isjoinable = ppu.joiner != -1;
}
error_code sys_ppu_thread_set_priority(u32 thread_id, s32 prio)
error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio)
{
sys_ppu_thread.trace("sys_ppu_thread_set_priority(thread_id=0x%x, prio=%d)", thread_id, prio);
const auto thread = idm::get<ppu_thread>(thread_id);
if (!thread)
{
return CELL_ESRCH;
}
if (prio < 0 || prio > 3071)
{
return CELL_EINVAL;
}
thread->prio = prio;
const auto thread = idm::check<ppu_thread>(thread_id, [&](ppu_thread& thread)
{
if (thread.prio != prio && thread.prio.exchange(prio) != prio)
{
lv2_obj::awake(thread, prio);
}
});
if (!thread)
{
return CELL_ESRCH;
}
if (&ppu == thread)
{
ppu.check_state();
}
return CELL_OK;
}
@ -145,15 +228,16 @@ error_code sys_ppu_thread_get_priority(u32 thread_id, vm::ptr<s32> priop)
{
sys_ppu_thread.trace("sys_ppu_thread_get_priority(thread_id=0x%x, priop=*0x%x)", thread_id, priop);
const auto thread = idm::get<ppu_thread>(thread_id);
const auto thread = idm::check<ppu_thread>(thread_id, [&](ppu_thread& thread)
{
*priop = thread.prio;
});
if (!thread)
{
return CELL_ESRCH;
}
*priop = thread->prio;
return CELL_OK;
}
@ -248,20 +332,31 @@ error_code _sys_ppu_thread_create(vm::ptr<u64> thread_id, vm::ptr<ppu_thread_par
return CELL_OK;
}
error_code sys_ppu_thread_start(u32 thread_id)
error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
{
sys_ppu_thread.notice("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
sys_ppu_thread.trace("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
const auto thread = idm::get<ppu_thread>(thread_id);
const auto thread = idm::get<ppu_thread>(thread_id, [&](ppu_thread& thread)
{
lv2_obj::awake(thread, -2);
});
if (!thread)
{
return CELL_ESRCH;
}
// TODO
thread->run();
if (!thread->state.test_and_reset(cpu_flag::stop))
{
// TODO: what happens there?
return CELL_EPERM;
}
else
{
thread->notify();
}
ppu.check_state();
return CELL_OK;
}

View file

@ -44,15 +44,15 @@ enum : u32
// Syscalls
void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode);
void sys_ppu_thread_yield();
void sys_ppu_thread_yield(ppu_thread& ppu);
error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ps3::ptr<u64> vptr);
error_code sys_ppu_thread_detach(u32 thread_id);
void sys_ppu_thread_get_join_state(ppu_thread& ppu, vm::ps3::ptr<s32> isjoinable);
error_code sys_ppu_thread_set_priority(u32 thread_id, s32 prio);
error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio);
error_code sys_ppu_thread_get_priority(u32 thread_id, vm::ps3::ptr<s32> priop);
error_code sys_ppu_thread_get_stack_information(ppu_thread& ppu, vm::ps3::ptr<sys_ppu_thread_stack_t> sp);
error_code sys_ppu_thread_stop(u32 thread_id);
error_code sys_ppu_thread_restart(u32 thread_id);
error_code _sys_ppu_thread_create(vm::ps3::ptr<u64> thread_id, vm::ps3::ptr<ppu_thread_param_t> param, u64 arg, u64 arg4, s32 prio, u32 stacksize, u64 flags, vm::ps3::cptr<char> threadname);
error_code sys_ppu_thread_start(u32 thread_id);
error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id);
error_code sys_ppu_thread_rename(u32 thread_id, vm::ps3::cptr<char> name);

View file

@ -24,6 +24,9 @@ error_code sys_rwlock_create(vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribu
const u32 protocol = attr->protocol;
if (protocol == SYS_SYNC_PRIORITY_INHERIT)
sys_rwlock.todo("sys_rwlock_create(): SYS_SYNC_PRIORITY_INHERIT");
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY && protocol != SYS_SYNC_PRIORITY_INHERIT)
{
sys_rwlock.error("sys_rwlock_create(): unknown protocol (0x%x)", protocol);
@ -107,6 +110,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
if (_old > 0 || _old & 1)
{
rwlock.rq.emplace_back(&ppu);
rwlock.sleep(ppu, start_time, timeout);
return false;
}
@ -123,7 +127,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
return CELL_OK;
}
// SLEEP
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
@ -141,7 +145,8 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
continue;
}
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -152,7 +157,8 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
}
}
return CELL_OK;
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_tryrlock(u32 rw_lock_id)
@ -187,7 +193,7 @@ error_code sys_rwlock_tryrlock(u32 rw_lock_id)
return CELL_OK;
}
error_code sys_rwlock_runlock(u32 rw_lock_id)
error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
{
sys_rwlock.trace("sys_rwlock_runlock(rw_lock_id=0x%x)", rw_lock_id);
@ -239,7 +245,7 @@ error_code sys_rwlock_runlock(u32 rw_lock_id)
{
rwlock->owner = cpu->id << 1 | !rwlock->wq.empty();
cpu->set_signal();
rwlock->awake(*cpu);
}
else
{
@ -250,6 +256,7 @@ error_code sys_rwlock_runlock(u32 rw_lock_id)
}
}
ppu.check_state();
return CELL_OK;
}
@ -292,6 +299,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
if (_old != 0)
{
rwlock.wq.emplace_back(&ppu);
rwlock.sleep(ppu, start_time, timeout);
return false;
}
@ -313,7 +321,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
return CELL_EDEADLK;
}
// SLEEP
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
@ -338,13 +346,14 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
while (auto cpu = rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_PRIORITY))
{
cpu->set_signal();
rwlock->awake(*cpu);
}
rwlock->owner &= ~1;
}
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -355,7 +364,8 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
}
}
return CELL_OK;
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_trywlock(ppu_thread& ppu, u32 rw_lock_id)
@ -418,7 +428,7 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
{
rwlock->owner = cpu->id << 1 | !rwlock->wq.empty();
cpu->set_signal();
rwlock->awake(*cpu);
}
else if (auto readers = rwlock->rq.size())
{
@ -426,7 +436,7 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
while (auto cpu = rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_PRIORITY))
{
cpu->set_signal();
rwlock->awake(*cpu);
}
rwlock->owner &= ~1;
@ -437,5 +447,10 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
}
}
if (rwlock.ret & 1)
{
ppu.check_state();
}
return CELL_OK;
}

View file

@ -51,7 +51,7 @@ error_code sys_rwlock_create(vm::ps3::ptr<u32> rw_lock_id, vm::ps3::ptr<sys_rwlo
error_code sys_rwlock_destroy(u32 rw_lock_id);
error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout);
error_code sys_rwlock_tryrlock(u32 rw_lock_id);
error_code sys_rwlock_runlock(u32 rw_lock_id);
error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id);
error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout);
error_code sys_rwlock_trywlock(ppu_thread& ppu, u32 rw_lock_id);
error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id);

View file

@ -30,6 +30,9 @@ error_code sys_semaphore_create(vm::ptr<u32> sem_id, vm::ptr<sys_semaphore_attri
const u32 protocol = attr->protocol;
if (protocol == SYS_SYNC_PRIORITY_INHERIT)
sys_semaphore.todo("sys_semaphore_create(): SYS_SYNC_PRIORITY_INHERIT");
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY && protocol != SYS_SYNC_PRIORITY_INHERIT)
{
sys_semaphore.error("sys_semaphore_create(): unknown protocol (0x%x)", protocol);
@ -101,6 +104,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
if (sema.val-- <= 0)
{
sema.sq.emplace_back(&ppu);
sema.sleep(ppu, start_time, timeout);
return false;
}
@ -117,7 +121,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
return CELL_OK;
}
// SLEEP
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
{
@ -144,7 +148,8 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
}
verify(HERE), sem->unqueue(sem->sq, &ppu);
return not_an_error(CELL_ETIMEDOUT);
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
thread_ctrl::wait_for(timeout - passed);
@ -155,7 +160,8 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
}
}
return CELL_OK;
ppu.check_state();
return not_an_error(ppu.gpr[3]);
}
error_code sys_semaphore_trywait(u32 sem_id)
@ -190,7 +196,7 @@ error_code sys_semaphore_trywait(u32 sem_id)
return CELL_OK;
}
error_code sys_semaphore_post(u32 sem_id, s32 count)
error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count)
{
sys_semaphore.trace("sys_semaphore_post(sem_id=0x%x, count=%d)", sem_id, count);
@ -245,10 +251,11 @@ error_code sys_semaphore_post(u32 sem_id, s32 count)
{
const auto cpu = verify(HERE, sem->schedule<ppu_thread>(sem->sq, sem->protocol));
cpu->set_signal();
sem->awake(*cpu);
}
}
ppu.check_state();
return CELL_OK;
}

View file

@ -53,5 +53,5 @@ error_code sys_semaphore_create(vm::ps3::ptr<u32> sem_id, vm::ps3::ptr<sys_semap
error_code sys_semaphore_destroy(u32 sem_id);
error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout);
error_code sys_semaphore_trywait(u32 sem_id);
error_code sys_semaphore_post(u32 sem_id, s32 count);
error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count);
error_code sys_semaphore_get_value(u32 sem_id, vm::ps3::ptr<s32> count);

View file

@ -6,6 +6,7 @@
#include "Loader/ELF.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/RawSPUThread.h"
#include "sys_interrupt.h"
#include "sys_event.h"
@ -470,7 +471,7 @@ error_code sys_spu_thread_group_terminate(u32 id, s32 value)
return CELL_OK;
}
error_code sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
{
sys_spu.warning("sys_spu_thread_group_join(id=0x%x, cause=*0x%x, status=*0x%x)", id, cause, status);
@ -494,6 +495,8 @@ error_code sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> st
return CELL_EBUSY;
}
lv2_obj::sleep(ppu, -1);
while ((group->join_state & ~SPU_TGJSF_IS_JOINING) == 0)
{
bool stopped = true;

View file

@ -221,7 +221,7 @@ error_code sys_spu_thread_group_suspend(u32 id);
error_code sys_spu_thread_group_resume(u32 id);
error_code sys_spu_thread_group_yield(u32 id);
error_code sys_spu_thread_group_terminate(u32 id, s32 value);
error_code sys_spu_thread_group_join(u32 id, vm::ps3::ptr<u32> cause, vm::ps3::ptr<u32> status);
error_code sys_spu_thread_group_join(ppu_thread&, u32 id, vm::ps3::ptr<u32> cause, vm::ps3::ptr<u32> status);
error_code sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et);
error_code sys_spu_thread_group_disconnect_event(u32 id, u32 et);
error_code sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq_id, u64 req, vm::ps3::ptr<u8> spup);

View file

@ -105,4 +105,39 @@ struct lv2_obj
queue.erase(it);
return res;
}
// Remove the current thread from the scheduling queue, register timeout
static void sleep(named_thread&, u64 wait_until);
template <typename T>
static void sleep(T& thread, u64 start_time, u64 timeout)
{
sleep(thread, timeout ? start_time + timeout : -1);
}
// Schedule the thread
static void awake(class cpu_thread&, u32 prio);
template <typename T>
static void awake(T& cpu)
{
awake(cpu, -1);
}
static void cleanup();
private:
// Scheduler mutex
static semaphore<> g_mutex;
// Scheduler queue for active PPU threads
static std::deque<class ppu_thread*> g_ppu;
// Waiting for the response from
static std::deque<class cpu_thread*> g_pending;
// Scheduler queue for timeouts (wait until -> thread)
static std::deque<std::pair<u64, named_thread*>> g_waiting;
static void schedule_all();
};

View file

@ -19,7 +19,7 @@ extern u64 get_system_time();
void lv2_timer::on_task()
{
while (true)
while (!Emu.IsStopped())
{
const u32 _state = state;
@ -50,11 +50,12 @@ void lv2_timer::on_task()
}
// TODO: use single global dedicated thread for busy waiting, no timer threads
lv2_obj::sleep(*this, next);
thread_ctrl::wait_for(next - _now);
}
else if (_state == SYS_TIMER_STATE_STOP)
{
thread_ctrl::wait();
thread_ctrl::wait_for(10000);
}
else
{
@ -143,7 +144,7 @@ error_code _sys_timer_start(u32 timer_id, u64 base_time, u64 period)
if (!period && start_time >= base_time)
{
// Invalid oneshot (TODO: what will happen if both args are 0?)
return CELL_ETIMEDOUT;
return not_an_error(CELL_ETIMEDOUT);
}
if (period && period < 100)
@ -292,11 +293,10 @@ error_code sys_timer_usleep(ppu_thread& ppu, u64 sleep_time)
u64 start = ppu.gpr[10] = get_system_time();
u64 passed = 0;
// SLEEP
lv2_obj::sleep(ppu, start, std::max<u64>(1, sleep_time));
while (sleep_time >= passed)
{
// TODO: use single global dedicated thread for busy waiting
thread_ctrl::wait_for(std::max<u64>(1, sleep_time - passed));
passed = get_system_time() - start;
}

View file

@ -139,6 +139,8 @@ public:
const std::string m_name;
atomic_t<void*> owner{};
const char* last_function = nullptr;
void write_pc(u32 value, u32 size)

View file

@ -375,6 +375,7 @@ namespace rsx
({
{ ppu_cmd::set_args, 1 }, u64{1},
{ ppu_cmd::lle_call, vblank_handler },
{ ppu_cmd::sleep, 0 }
});
intr_thread->notify();

View file

@ -779,6 +779,7 @@ namespace rsx
({
{ ppu_cmd::set_args, 1 }, u64{1},
{ ppu_cmd::lle_call, rsx->flip_handler },
{ ppu_cmd::sleep, 0 }
});
rsx->intr_thread->notify();
@ -793,6 +794,7 @@ namespace rsx
({
{ ppu_cmd::set_args, 1 }, u64{arg},
{ ppu_cmd::lle_call, rsx->user_handler },
{ ppu_cmd::sleep, 0 }
});
rsx->intr_thread->notify();

View file

@ -466,6 +466,7 @@ void Emulator::Stop()
LOG_NOTICE(GENERAL, "All threads stopped...");
lv2_obj::cleanup();
idm::clear();
fxm::clear();