mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-08-07 00:29:20 +00:00
sys_cond fixes (#6584)
* sys_cond fixes sys_cond_wait is now signaled atomically (regression fix) Fix a corner case with sys_cond_wait and sys_cond_destroy EBSUY check (waiter count was inaccurate if thread is not the owner of mutex) Add not about EBUSY corner case (TODO) * Fix inconcistency in sys_cond_destroy and ETIMEDOUT .. event at sys_cond_wait regarding waiters count. Now waiters count is properly connected to the mutex owner actions after ETIMEDOUT in sys_cond_wait.
This commit is contained in:
parent
286d62d09c
commit
08e674aa28
1 changed files with 49 additions and 46 deletions
|
@ -50,6 +50,8 @@ error_code sys_cond_destroy(ppu_thread& ppu, u32 cond_id)
|
||||||
|
|
||||||
const auto cond = idm::withdraw<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> CellError
|
const auto cond = idm::withdraw<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> CellError
|
||||||
{
|
{
|
||||||
|
std::lock_guard lock(cond.mutex->mutex);
|
||||||
|
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
return CELL_EBUSY;
|
return CELL_EBUSY;
|
||||||
|
@ -84,10 +86,16 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
|
||||||
std::lock_guard lock(cond.mutex->mutex);
|
std::lock_guard lock(cond.mutex->mutex);
|
||||||
|
|
||||||
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
||||||
|
{
|
||||||
|
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
|
||||||
|
cond.waiters--;
|
||||||
|
|
||||||
|
if (cond.mutex->try_own(*cpu, cpu->id))
|
||||||
{
|
{
|
||||||
cond.awake(cpu);
|
cond.awake(cpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!cond)
|
if (!cond)
|
||||||
|
@ -106,21 +114,24 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
|
||||||
|
|
||||||
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
|
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
|
||||||
{
|
{
|
||||||
uint count = 0;
|
|
||||||
|
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(cond.mutex->mutex);
|
std::lock_guard lock(cond.mutex->mutex);
|
||||||
|
|
||||||
|
cpu_thread* result = nullptr;
|
||||||
|
cond.waiters -= ::size32(cond.sq);
|
||||||
|
|
||||||
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
||||||
{
|
{
|
||||||
lv2_obj::append(cpu);
|
if (cond.mutex->try_own(*cpu, cpu->id))
|
||||||
count++;
|
{
|
||||||
|
verify(HERE), !std::exchange(result, cpu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (count)
|
if (result)
|
||||||
{
|
{
|
||||||
lv2_obj::awake_all();
|
lv2_obj::awake(result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -155,7 +166,14 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
|
||||||
if (cpu->id == thread_id)
|
if (cpu->id == thread_id)
|
||||||
{
|
{
|
||||||
verify(HERE), cond.unqueue(cond.sq, cpu);
|
verify(HERE), cond.unqueue(cond.sq, cpu);
|
||||||
|
|
||||||
|
cond.waiters--;
|
||||||
|
|
||||||
|
if (cond.mutex->try_own(*cpu, cpu->id))
|
||||||
|
{
|
||||||
cond.awake(cpu);
|
cond.awake(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -184,9 +202,12 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
|
||||||
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
|
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
|
||||||
|
|
||||||
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond)
|
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond)
|
||||||
|
{
|
||||||
|
if (cond.mutex->owner >> 1 == ppu.id)
|
||||||
{
|
{
|
||||||
// Add a "promise" to add a waiter
|
// Add a "promise" to add a waiter
|
||||||
cond.waiters++;
|
cond.waiters++;
|
||||||
|
}
|
||||||
|
|
||||||
// Save the recursive value
|
// Save the recursive value
|
||||||
return cond.mutex->lock_count.load();
|
return cond.mutex->lock_count.load();
|
||||||
|
@ -200,8 +221,6 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
|
||||||
// Verify ownership
|
// Verify ownership
|
||||||
if (cond->mutex->owner >> 1 != ppu.id)
|
if (cond->mutex->owner >> 1 != ppu.id)
|
||||||
{
|
{
|
||||||
// Awww
|
|
||||||
cond->waiters--;
|
|
||||||
return CELL_EPERM;
|
return CELL_EPERM;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -237,61 +256,45 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
|
||||||
{
|
{
|
||||||
if (lv2_obj::wait_timeout(timeout, &ppu))
|
if (lv2_obj::wait_timeout(timeout, &ppu))
|
||||||
{
|
{
|
||||||
|
// Wait for rescheduling
|
||||||
|
if (ppu.check_state())
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
std::lock_guard lock(cond->mutex->mutex);
|
std::lock_guard lock(cond->mutex->mutex);
|
||||||
|
|
||||||
// Try to cancel the waiting
|
// Try to cancel the waiting
|
||||||
if (cond->unqueue(cond->sq, &ppu))
|
if (cond->unqueue(cond->sq, &ppu))
|
||||||
{
|
{
|
||||||
ppu.gpr[3] = CELL_ETIMEDOUT;
|
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
|
||||||
break;
|
cond->waiters--;
|
||||||
}
|
|
||||||
|
|
||||||
|
ppu.gpr[3] = CELL_ETIMEDOUT;
|
||||||
|
|
||||||
|
// Own or requeue
|
||||||
|
if (!cond->mutex->try_own(ppu, ppu.id))
|
||||||
|
{
|
||||||
|
cond->mutex->sleep(ppu);
|
||||||
timeout = 0;
|
timeout = 0;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
thread_ctrl::wait();
|
thread_ctrl::wait();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool locked_ok = true;
|
|
||||||
|
|
||||||
// Schedule and relock
|
|
||||||
if (ppu.check_state())
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
else if (cond->mutex->try_lock(ppu.id) != CELL_OK)
|
|
||||||
{
|
|
||||||
std::lock_guard lock(cond->mutex->mutex);
|
|
||||||
|
|
||||||
// Own mutex or requeue
|
|
||||||
if (!cond->mutex->try_own(ppu, ppu.id))
|
|
||||||
{
|
|
||||||
locked_ok = false;
|
|
||||||
cond->mutex->sleep(ppu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!locked_ok && !ppu.state.test_and_reset(cpu_flag::signal))
|
|
||||||
{
|
|
||||||
if (ppu.is_stopped())
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
thread_ctrl::wait();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify ownership
|
// Verify ownership
|
||||||
verify(HERE), cond->mutex->owner >> 1 == ppu.id;
|
verify(HERE), cond->mutex->owner >> 1 == ppu.id;
|
||||||
|
|
||||||
// Restore the recursive value
|
// Restore the recursive value
|
||||||
cond->mutex->lock_count = cond.ret;
|
cond->mutex->lock_count = cond.ret;
|
||||||
|
|
||||||
cond->waiters--;
|
|
||||||
|
|
||||||
return not_an_error(ppu.gpr[3]);
|
return not_an_error(ppu.gpr[3]);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue