mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-20 11:36:13 +00:00
sys_lwmutex, sys_lwcond refactoring
This commit is contained in:
parent
a209d0d7fa
commit
93db420f80
4 changed files with 132 additions and 61 deletions
|
@ -18,11 +18,11 @@ vm::ptr<void> _sys_memset(vm::ptr<void> dst, s32 value, u32 size);
|
|||
struct sys_lwmutex_t;
|
||||
struct sys_lwmutex_attribute_t;
|
||||
|
||||
s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attribute_t> attr);
|
||||
s32 sys_lwmutex_lock(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout);
|
||||
s32 sys_lwmutex_trylock(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
s32 sys_lwmutex_unlock(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
s32 sys_lwmutex_destroy(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
error_code sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attribute_t> attr);
|
||||
error_code sys_lwmutex_lock(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout);
|
||||
error_code sys_lwmutex_trylock(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
error_code sys_lwmutex_unlock(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
error_code sys_lwmutex_destroy(ppu_thread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
|
||||
struct sys_lwmutex_locker
|
||||
{
|
||||
|
@ -45,11 +45,11 @@ struct sys_lwmutex_locker
|
|||
struct sys_lwcond_t;
|
||||
struct sys_lwcond_attribute_t;
|
||||
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr);
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal_all(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal_to(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id);
|
||||
s32 sys_lwcond_wait(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout);
|
||||
error_code sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr);
|
||||
error_code sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond);
|
||||
error_code sys_lwcond_signal(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond);
|
||||
error_code sys_lwcond_signal_all(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond);
|
||||
error_code sys_lwcond_signal_to(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id);
|
||||
error_code sys_lwcond_wait(ppu_thread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout);
|
||||
|
||||
void sys_ppu_thread_exit(ppu_thread& CPU, u64 val);
|
||||
|
|
|
@ -5,17 +5,24 @@
|
|||
|
||||
#include "Emu/Cell/lv2/sys_lwmutex.h"
|
||||
#include "Emu/Cell/lv2/sys_lwcond.h"
|
||||
#include "Emu/Cell/lv2/sys_mutex.h"
|
||||
#include "Emu/Cell/lv2/sys_cond.h"
|
||||
#include "sysPrxForUser.h"
|
||||
|
||||
extern logs::channel sysPrxForUser;
|
||||
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr)
|
||||
extern bool g_avoid_lwm = false;
|
||||
|
||||
error_code sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwcond_create(lwcond=*0x%x, lwmutex=*0x%x, attr=*0x%x)", lwcond, lwmutex, attr);
|
||||
|
||||
vm::var<u32> out_id;
|
||||
vm::var<sys_cond_attribute_t> attrs;
|
||||
attrs->pshared = SYS_SYNC_NOT_PROCESS_SHARED;
|
||||
attrs->name_u64 = attr->name_u64;
|
||||
|
||||
if (s32 res = _sys_lwcond_create(out_id, lwmutex->sleep_queue, lwcond, attr->name_u64, 0))
|
||||
if (auto res = g_avoid_lwm ? sys_cond_create(out_id, lwmutex->sleep_queue, attrs) : _sys_lwcond_create(out_id, lwmutex->sleep_queue, lwcond, attr->name_u64, 0))
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
@ -25,11 +32,16 @@ s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmut
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond)
|
||||
error_code sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwcond_destroy(lwcond=*0x%x)", lwcond);
|
||||
|
||||
if (s32 res = _sys_lwcond_destroy(lwcond->lwcond_queue))
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_cond_destroy(lwcond->lwcond_queue);
|
||||
}
|
||||
|
||||
if (error_code res = _sys_lwcond_destroy(lwcond->lwcond_queue))
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
@ -38,16 +50,20 @@ s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
||||
error_code sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwcond_signal(lwcond=*0x%x)", lwcond);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_cond_signal(ppu, lwcond->lwcond_queue);
|
||||
}
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY)
|
||||
{
|
||||
// TODO (protocol ignored)
|
||||
//return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2);
|
||||
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, -1, 2);
|
||||
}
|
||||
|
||||
if (lwmutex->vars.owner.load() == ppu.id)
|
||||
|
@ -56,18 +72,21 @@ s32 sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
|||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1))
|
||||
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1))
|
||||
{
|
||||
ppu.test_state();
|
||||
lwmutex->all_info--;
|
||||
|
||||
return res == CELL_EPERM ? CELL_OK : res;
|
||||
if (res != CELL_EPERM)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (s32 res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
if (error_code res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
{
|
||||
// if locking failed
|
||||
|
||||
|
@ -84,7 +103,7 @@ s32 sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
|||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3))
|
||||
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3))
|
||||
{
|
||||
ppu.test_state();
|
||||
lwmutex->all_info--;
|
||||
|
@ -92,28 +111,35 @@ s32 sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
|||
// unlock the lightweight mutex
|
||||
sys_lwmutex_unlock(ppu, lwmutex);
|
||||
|
||||
return res == CELL_ENOENT ? CELL_OK : res;
|
||||
if (res != CELL_ENOENT)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
||||
error_code sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_cond_signal_all(ppu, lwcond->lwcond_queue);
|
||||
}
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY)
|
||||
{
|
||||
// TODO (protocol ignored)
|
||||
//return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
|
||||
return _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
|
||||
}
|
||||
|
||||
if (lwmutex->vars.owner.load() == ppu.id)
|
||||
{
|
||||
// if owns the mutex, call the syscall
|
||||
const s32 res = _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
|
||||
const error_code res = _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
|
||||
|
||||
if (res <= 0)
|
||||
{
|
||||
|
@ -127,7 +153,7 @@ s32 sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (s32 res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
if (error_code res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
{
|
||||
// if locking failed
|
||||
|
||||
|
@ -141,7 +167,7 @@ s32 sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
|||
}
|
||||
|
||||
// if locking succeeded, call the syscall
|
||||
s32 res = _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
|
||||
error_code res = _sys_lwcond_signal_all(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
|
||||
|
||||
ppu.test_state();
|
||||
|
||||
|
@ -158,16 +184,20 @@ s32 sys_lwcond_signal_all(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
|
|||
return res;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
|
||||
error_code sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwcond_signal_to(lwcond=*0x%x, ppu_thread_id=0x%x)", lwcond, ppu_thread_id);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_cond_signal_to(ppu, lwcond->lwcond_queue, ppu_thread_id);
|
||||
}
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY)
|
||||
{
|
||||
// TODO (protocol ignored)
|
||||
//return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2);
|
||||
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, ppu_thread_id, 2);
|
||||
}
|
||||
|
||||
if (lwmutex->vars.owner.load() == ppu.id)
|
||||
|
@ -176,7 +206,7 @@ s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_
|
|||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1))
|
||||
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1))
|
||||
{
|
||||
ppu.test_state();
|
||||
lwmutex->all_info--;
|
||||
|
@ -187,7 +217,7 @@ s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (s32 res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
if (error_code res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
{
|
||||
// if locking failed
|
||||
|
||||
|
@ -204,7 +234,7 @@ s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_
|
|||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3))
|
||||
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3))
|
||||
{
|
||||
ppu.test_state();
|
||||
lwmutex->all_info--;
|
||||
|
@ -218,10 +248,15 @@ s32 sys_lwcond_signal_to(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_wait(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
error_code sys_lwcond_wait(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_cond_wait(ppu, lwcond->lwcond_queue, timeout);
|
||||
}
|
||||
|
||||
const be_t<u32> tid(ppu.id);
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
@ -240,7 +275,7 @@ s32 sys_lwcond_wait(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
|||
lwmutex->recursive_count = 0;
|
||||
|
||||
// call the syscall
|
||||
s32 res = _sys_lwcond_queue_wait(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout);
|
||||
const error_code res = _sys_lwcond_queue_wait(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout);
|
||||
|
||||
if (res == CELL_OK || res == CELL_ESRCH)
|
||||
{
|
||||
|
@ -253,7 +288,7 @@ s32 sys_lwcond_wait(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
|||
const auto old = lwmutex->vars.owner.exchange(tid);
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
if (old != lwmutex_reserved)
|
||||
if (old == lwmutex_free || old == lwmutex_dead)
|
||||
{
|
||||
fmt::throw_exception("Locking failed (lwmutex=*0x%x, owner=0x%x)" HERE, lwmutex, old);
|
||||
}
|
||||
|
@ -263,17 +298,20 @@ s32 sys_lwcond_wait(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
|||
|
||||
if (res == CELL_EBUSY || res == CELL_ETIMEDOUT)
|
||||
{
|
||||
const s32 res2 = sys_lwmutex_lock(ppu, lwmutex, 0);
|
||||
|
||||
if (res2 == CELL_OK)
|
||||
if (error_code res2 = sys_lwmutex_lock(ppu, lwmutex, 0))
|
||||
{
|
||||
// if successfully locked, restore recursive value
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
return res == CELL_EBUSY ? CELL_OK : res;
|
||||
return res2;
|
||||
}
|
||||
|
||||
return res2;
|
||||
// if successfully locked, restore recursive value
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
if (res == CELL_EBUSY)
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
if (res == CELL_EDEADLK)
|
||||
|
@ -282,15 +320,15 @@ s32 sys_lwcond_wait(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
|||
const auto old = lwmutex->vars.owner.exchange(tid);
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
if (old != lwmutex_reserved)
|
||||
if (old == lwmutex_free || old == lwmutex_dead)
|
||||
{
|
||||
fmt::throw_exception("Locking failed (lwmutex=*0x%x, owner=0x%x)" HERE, lwmutex, old);
|
||||
}
|
||||
|
||||
return CELL_ETIMEDOUT;
|
||||
return not_an_error(CELL_ETIMEDOUT);
|
||||
}
|
||||
|
||||
fmt::throw_exception("Unexpected syscall result (lwcond=*0x%x, result=0x%x)" HERE, lwcond, res);
|
||||
fmt::throw_exception("Unexpected syscall result (lwcond=*0x%x, result=0x%x)" HERE, lwcond, +res);
|
||||
}
|
||||
|
||||
void sysPrxForUser_sys_lwcond_init()
|
||||
|
|
|
@ -5,11 +5,14 @@
|
|||
#include "Emu/Cell/PPUModule.h"
|
||||
|
||||
#include "Emu/Cell/lv2/sys_lwmutex.h"
|
||||
#include "Emu/Cell/lv2/sys_mutex.h"
|
||||
#include "sysPrxForUser.h"
|
||||
|
||||
extern logs::channel sysPrxForUser;
|
||||
|
||||
s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attribute_t> attr)
|
||||
extern bool g_avoid_lwm;
|
||||
|
||||
error_code sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attribute_t> attr)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwmutex_create(lwmutex=*0x%x, attr=*0x%x)", lwmutex, attr);
|
||||
|
||||
|
@ -32,8 +35,16 @@ s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attri
|
|||
}
|
||||
|
||||
vm::var<u32> out_id;
|
||||
vm::var<sys_mutex_attribute_t> attrs;
|
||||
attrs->protocol = attr->protocol;
|
||||
attrs->recursive = attr->recursive;
|
||||
attrs->pshared = SYS_SYNC_NOT_PROCESS_SHARED;
|
||||
attrs->adaptive = SYS_SYNC_NOT_ADAPTIVE;
|
||||
attrs->ipc_key = 0;
|
||||
attrs->flags = 0;
|
||||
attrs->name_u64 = attr->name_u64;
|
||||
|
||||
if (s32 res = _sys_lwmutex_create(out_id, protocol, lwmutex, 0x80000001, attr->name_u64, 0))
|
||||
if (error_code res = g_avoid_lwm ? sys_mutex_create(out_id, attrs) : _sys_lwmutex_create(out_id, protocol, lwmutex, 0x80000001, attr->name_u64, 0))
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
@ -45,10 +56,15 @@ s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attri
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_destroy(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
error_code sys_lwmutex_destroy(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_mutex_destroy(lwmutex->sleep_queue);
|
||||
}
|
||||
|
||||
// check to prevent recursive locking in the next call
|
||||
if (lwmutex->vars.owner.load() == ppu.id)
|
||||
{
|
||||
|
@ -56,13 +72,13 @@ s32 sys_lwmutex_destroy(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
|||
}
|
||||
|
||||
// attempt to lock the mutex
|
||||
if (s32 res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
if (error_code res = sys_lwmutex_trylock(ppu, lwmutex))
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwmutex_destroy(lwmutex->sleep_queue))
|
||||
if (error_code res = _sys_lwmutex_destroy(lwmutex->sleep_queue))
|
||||
{
|
||||
// unlock the mutex if failed
|
||||
sys_lwmutex_unlock(ppu, lwmutex);
|
||||
|
@ -76,10 +92,15 @@ s32 sys_lwmutex_destroy(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_lock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout)
|
||||
error_code sys_lwmutex_lock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwmutex_lock(lwmutex=*0x%x, timeout=0x%llx)", lwmutex, timeout);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_mutex_lock(ppu, lwmutex->sleep_queue, timeout);
|
||||
}
|
||||
|
||||
const be_t<u32> tid(ppu.id);
|
||||
|
||||
// try to lock lightweight mutex
|
||||
|
@ -146,7 +167,7 @@ s32 sys_lwmutex_lock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeou
|
|||
}
|
||||
|
||||
// lock using the syscall
|
||||
const s32 res = _sys_lwmutex_lock(ppu, lwmutex->sleep_queue, timeout);
|
||||
const error_code res = _sys_lwmutex_lock(ppu, lwmutex->sleep_queue, timeout);
|
||||
|
||||
lwmutex->all_info--;
|
||||
|
||||
|
@ -172,10 +193,15 @@ s32 sys_lwmutex_lock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeou
|
|||
return res;
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_trylock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
error_code sys_lwmutex_trylock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwmutex_trylock(lwmutex=*0x%x)", lwmutex);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_mutex_trylock(ppu, lwmutex->sleep_queue);
|
||||
}
|
||||
|
||||
const be_t<u32> tid(ppu.id);
|
||||
|
||||
// try to lock lightweight mutex
|
||||
|
@ -219,7 +245,7 @@ s32 sys_lwmutex_trylock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
|||
if (old_owner == lwmutex_reserved)
|
||||
{
|
||||
// should be locked by the syscall
|
||||
const s32 res = _sys_lwmutex_trylock(lwmutex->sleep_queue);
|
||||
const error_code res = _sys_lwmutex_trylock(lwmutex->sleep_queue);
|
||||
|
||||
if (res == CELL_OK)
|
||||
{
|
||||
|
@ -236,13 +262,18 @@ s32 sys_lwmutex_trylock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
|||
}
|
||||
|
||||
// locked by another thread
|
||||
return CELL_EBUSY;
|
||||
return not_an_error(CELL_EBUSY);
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_unlock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
error_code sys_lwmutex_unlock(ppu_thread& ppu, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
{
|
||||
sysPrxForUser.trace("sys_lwmutex_unlock(lwmutex=*0x%x)", lwmutex);
|
||||
|
||||
if (g_avoid_lwm)
|
||||
{
|
||||
return sys_mutex_unlock(ppu, lwmutex->sleep_queue);
|
||||
}
|
||||
|
||||
const be_t<u32> tid(ppu.id);
|
||||
|
||||
// check owner
|
||||
|
|
|
@ -802,13 +802,15 @@ s32 error_code::error_report(const fmt_type_info* sup, u64 arg)
|
|||
{
|
||||
case CELL_ESRCH:
|
||||
case CELL_EDEADLK:
|
||||
case CELL_EPERM:
|
||||
{
|
||||
if (ppu.m_name == "_cellsurMixerMain" || ppu.m_name == "_sys_MixerChStripMain")
|
||||
{
|
||||
if (std::memcmp(ppu.last_function, "sys_mutex_lock", 15) == 0 ||
|
||||
std::memcmp(ppu.last_function, "sys_lwmutex_lock", 17) == 0 ||
|
||||
std::memcmp(ppu.last_function, "_sys_mutex_lock", 16) == 0 ||
|
||||
std::memcmp(ppu.last_function, "_sys_lwmutex_lock", 18) == 0)
|
||||
std::memcmp(ppu.last_function, "_sys_lwmutex_lock", 18) == 0 ||
|
||||
std::memcmp(ppu.last_function, "sys_lwmutex_unlock", 19) == 0)
|
||||
{
|
||||
level = logs::level::trace;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue