From 5d4e87373f7b2b6a1f5ebb02515748de2be7ad25 Mon Sep 17 00:00:00 2001 From: Eladash Date: Wed, 24 May 2023 20:47:29 +0300 Subject: [PATCH] LV2: Make _sys_lwcond_destroy wait for lwmutex lock --- rpcs3/Emu/Cell/lv2/sys_lwcond.cpp | 85 ++++++++++++++++++++++++++----- rpcs3/Emu/Cell/lv2/sys_lwcond.h | 2 + 2 files changed, 75 insertions(+), 12 deletions(-) diff --git a/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp b/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp index 4bab5ba2d6..a6b0228253 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp @@ -64,24 +64,77 @@ error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id) sys_lwcond.warning("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id); - const auto cond = idm::withdraw(lwcond_id, [&](lv2_lwcond& cond) -> CellError + std::shared_ptr _cond; + + while (true) { - if (atomic_storage::load(cond.sq)) + s32 old_val = 0; + + auto [ptr, ret] = idm::withdraw(lwcond_id, [&](lv2_lwcond& cond) -> CellError { - return CELL_EBUSY; + // Ignore check on first iteration + if (_cond && std::addressof(cond) != _cond.get()) + { + // Other thread has destroyed the lwcond earlier + return CELL_ESRCH; + } + + std::lock_guard lock(cond.mutex); + + if (atomic_storage::load(cond.sq)) + { + return CELL_EBUSY; + } + + old_val = cond.lwmutex_waiters.or_fetch(smin); + + if (old_val != smin) + { + // De-schedule if waiters were found + lv2_obj::sleep(ppu); + + // Repeat loop: there are lwmutex waiters inside _sys_lwcond_queue_wait + return CELL_EAGAIN; + } + + return {}; + }); + + if (!ptr) + { + return CELL_ESRCH; } - return {}; - }); + if (ret) + { + if (ret != CELL_EAGAIN) + { + return ret; + } + } + else + { + break; + } - if (!cond) - { - return CELL_ESRCH; - } + _cond = std::move(ptr); - if (cond.ret) - { - return cond.ret; + // Wait for all lwcond waiters to quit + while (old_val + 0u > 1u << 31) + { + thread_ctrl::wait_on(_cond->lwmutex_waiters, old_val); + + if (ppu.is_stopped()) + { + ppu.state += cpu_flag::again; + return {}; + } + + old_val = _cond->lwmutex_waiters; + } + + // Wake up from sleep + ppu.check_state(); } return CELL_OK; @@ -341,6 +394,8 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id std::lock_guard lock(cond.mutex); + cond.lwmutex_waiters++; + const bool mutex_sleep = sstate.try_read().second; sstate.clear(); @@ -510,6 +565,12 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id mutex->lwcond_waiters.notify_all(); } + if (--cond->lwmutex_waiters == smin) + { + // Notify the thread destroying lwcond on last waiter + cond->lwmutex_waiters.notify_all(); + } + // Return cause return not_an_error(ppu.gpr[3]); } diff --git a/rpcs3/Emu/Cell/lv2/sys_lwcond.h b/rpcs3/Emu/Cell/lv2/sys_lwcond.h index 58246f9b28..1a841b08c3 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwcond.h +++ b/rpcs3/Emu/Cell/lv2/sys_lwcond.h @@ -33,6 +33,8 @@ struct lv2_lwcond final : lv2_obj shared_mutex mutex; ppu_thread* sq{}; + atomic_t lwmutex_waiters = 0; + lv2_lwcond(u64 name, u32 lwid, u32 protocol, vm::ptr control) noexcept : name(std::bit_cast>(name)) , lwid(lwid)